diff --git a/[refs] b/[refs]
index 52f5fbc25ac0..68d9ce35665a 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: afd290945cd283030b51b433a66fe57a8feb28c8
+refs/heads/master: e1616300a20c80396109c1cf013ba9a36055a3da
diff --git a/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml b/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml
index c7a4ca517859..ffee1fbbc001 100644
--- a/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ b/trunk/Documentation/DocBook/media/dvb/dvbproperty.xml
@@ -163,16 +163,14 @@ get/set up to 64 properties. The actual meaning of each property is described on
DTV_FREQUENCY
- Central frequency of the channel.
+ Central frequency of the channel, in HZ.
Notes:
- 1)For satellital delivery systems, it is measured in kHz.
- For the other ones, it is measured in Hz.
- 2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
+ 1)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of
the channel which is 6MHz.
- 3)As in ISDB-Tsb the channel consists of only one or three segments the
+ 2)As in ISDB-Tsb the channel consists of only one or three segments the
frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
central frequency of the channel is expected.
@@ -737,10 +735,14 @@ typedef enum fe_hierarchy {
DTV_TUNE
DTV_CLEAR
DTV_FREQUENCY
+ DTV_MODULATION
DTV_BANDWIDTH_HZ
DTV_INVERSION
+ DTV_CODE_RATE_HP
+ DTV_CODE_RATE_LP
DTV_GUARD_INTERVAL
DTV_TRANSMISSION_MODE
+ DTV_HIERARCHY
DTV_ISDBT_LAYER_ENABLED
DTV_ISDBT_PARTIAL_RECEPTION
DTV_ISDBT_SOUND_BROADCASTING
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index b17a7aac6997..6f1f9a629dc3 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -183,12 +183,7 @@ applications must set the array to zero.
__u32
ctrl_class
The control class to which all controls belong, see
-. Drivers that use a kernel framework for handling
-controls will also accept a value of 0 here, meaning that the controls can
-belong to any control class. Whether drivers support this can be tested by setting
-ctrl_class to 0 and calling VIDIOC_TRY_EXT_CTRLS
-with a count of 0. If that succeeds, then the driver
-supports this feature.
+.
__u32
@@ -199,13 +194,10 @@ also be zero.
__u32
error_idx
- Set by the driver in case of an error. If it is equal
-to count, then no actual changes were made to
-controls. In other words, the error was not associated with setting a particular
-control. If it is another value, then only the controls up to error_idx-1
-were modified and control error_idx is the one that
-caused the error. The error_idx value is undefined
-if the ioctl returned 0 (success).
+ Set by the driver in case of an error. It is the
+index of the control causing the error or equal to 'count' when the
+error is not associated with a particular control. Undefined when the
+ioctl returns 0 (success).
__u32
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
index 7c63815e7afd..93817f337033 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
@@ -364,20 +364,15 @@ capability and it is cleared otherwise.
V4L2_FBUF_FLAG_OVERLAY
0x0002
- If this flag is set for a video capture device, then the
-driver will set the initial overlay size to cover the full framebuffer size,
-otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
-
-Only one video capture driver (bttv) supports this flag. The use of this flag
-for capture devices is deprecated. There is no way to detect which drivers
-support this flag, so the only reliable method of setting the overlay size is
-through &VIDIOC-S-FMT;.
-
-If this flag is set for a video output device, then the video output overlay
-window is relative to the top-left corner of the framebuffer and restricted
-to the size of the framebuffer. If it is cleared, then the video output
-overlay window is relative to the video output display.
-
+ The frame buffer is an overlay surface the same
+size as the capture. [?]
+
+
+ The purpose of
+V4L2_FBUF_FLAG_OVERLAY was never quite clear.
+Most drivers seem to ignore this flag. For compatibility with the
+bttv driver applications should set the
+V4L2_FBUF_FLAG_OVERLAY flag.
V4L2_FBUF_FLAG_CHROMAKEY
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
index 66e9a5257861..16431813bebd 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
@@ -98,11 +98,8 @@ the &v4l2-output; modulator field and the
&v4l2-tuner-type;
type
The tuner type. This is the same value as in the
-&v4l2-tuner; type field. The type must be set
-to V4L2_TUNER_RADIO for /dev/radioX
-device nodes, and to V4L2_TUNER_ANALOG_TV
-for all others. The field is not applicable to modulators, &ie; ignored
-by drivers.
+&v4l2-tuner; type field. The field is not
+applicable to modulators, &ie; ignored by drivers.
__u32
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml
index 1d43065090dd..08ae82f131f2 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-input.xml
@@ -61,8 +61,8 @@ desired input in an integer and call the
VIDIOC_S_INPUT ioctl with a pointer to this
integer. Side effects are possible. For example inputs may support
different video standards, so the driver may implicitly switch the
-current standard. Because of these possible side effects applications
-must select an input before querying or negotiating any other parameters.
+current standard. It is good practice to select an input before
+querying or negotiating any other parameters.
Information about video inputs is available using the
&VIDIOC-ENUMINPUT; ioctl.
diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml
index 4533068ecb8a..fd45f1c13ccf 100644
--- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml
+++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-output.xml
@@ -61,9 +61,8 @@ desired output in an integer and call the
VIDIOC_S_OUTPUT ioctl with a pointer to this integer.
Side effects are possible. For example outputs may support different
video standards, so the driver may implicitly switch the current
-standard.
-standard. Because of these possible side effects applications
-must select an output before querying or negotiating any other parameters.
+standard. It is good practice to select an output before querying or
+negotiating any other parameters.
Information about video outputs is available using the
&VIDIOC-ENUMOUTPUT; ioctl.
diff --git a/trunk/Documentation/acpi/apei/einj.txt b/trunk/Documentation/acpi/apei/einj.txt
index e7cc36397217..5cc699ba5453 100644
--- a/trunk/Documentation/acpi/apei/einj.txt
+++ b/trunk/Documentation/acpi/apei/einj.txt
@@ -47,53 +47,20 @@ directory apei/einj. The following files are provided.
- param1
This file is used to set the first error parameter value. Effect of
- parameter depends on error_type specified.
+ parameter depends on error_type specified. For memory error, this is
+ physical memory address. Only available if param_extension module
+ parameter is specified.
- param2
This file is used to set the second error parameter value. Effect of
- parameter depends on error_type specified.
-
-BIOS versions based in the ACPI 4.0 specification have limited options
-to control where the errors are injected. Your BIOS may support an
-extension (enabled with the param_extension=1 module parameter, or
-boot command line einj.param_extension=1). This allows the address
-and mask for memory injections to be specified by the param1 and
-param2 files in apei/einj.
-
-BIOS versions using the ACPI 5.0 specification have more control over
-the target of the injection. For processor related errors (type 0x1,
-0x2 and 0x4) the APICID of the target should be provided using the
-param1 file in apei/einj. For memory errors (type 0x8, 0x10 and 0x20)
-the address is set using param1 with a mask in param2 (0x0 is equivalent
-to all ones). For PCI express errors (type 0x40, 0x80 and 0x100) the
-segment, bus, device and function are specified using param1:
-
- 31 24 23 16 15 11 10 8 7 0
- +-------------------------------------------------+
- | segment | bus | device | function | reserved |
- +-------------------------------------------------+
-
-An ACPI 5.0 BIOS may also allow vendor specific errors to be injected.
-In this case a file named vendor will contain identifying information
-from the BIOS that hopefully will allow an application wishing to use
-the vendor specific extension to tell that they are running on a BIOS
-that supports it. All vendor extensions have the 0x80000000 bit set in
-error_type. A file vendor_flags controls the interpretation of param1
-and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor
-documentation for details (and expect changes to this API if vendors
-creativity in using this feature expands beyond our expectations).
-
-Example:
-# cd /sys/kernel/debug/apei/einj
-# cat available_error_type # See which errors can be injected
-0x00000002 Processor Uncorrectable non-fatal
-0x00000008 Memory Correctable
-0x00000010 Memory Uncorrectable non-fatal
-# echo 0x12345000 > param1 # Set memory address for injection
-# echo 0xfffffffffffff000 > param2 # Mask - anywhere in this page
-# echo 0x8 > error_type # Choose correctable memory error
-# echo 1 > error_inject # Inject now
+ parameter depends on error_type specified. For memory error, this is
+ physical memory address mask. Only available if param_extension
+ module parameter is specified.
+Injecting parameter support is a BIOS version specific extension, that
+is, it only works on some BIOS version. If you want to use it, please
+make sure your BIOS version has the proper support and specify
+"param_extension=y" in module parameter.
For more information about EINJ, please refer to ACPI specification
-version 4.0, section 17.5 and ACPI 5.0, section 18.6.
+version 4.0, section 17.5.
diff --git a/trunk/Documentation/devices.txt b/trunk/Documentation/devices.txt
index 00383186d8fb..cec8864ce4e8 100644
--- a/trunk/Documentation/devices.txt
+++ b/trunk/Documentation/devices.txt
@@ -447,9 +447,6 @@ Your cooperation is appreciated.
234 = /dev/btrfs-control Btrfs control device
235 = /dev/autofs Autofs control device
236 = /dev/mapper/control Device-Mapper control device
- 237 = /dev/loop-control Loopback control device
- 238 = /dev/vhost-net Host kernel accelerator for virtio net
-
240-254 Reserved for local use
255 Reserved for MISC_DYNAMIC_MINOR
diff --git a/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt b/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt
deleted file mode 100644
index 3c046ee6e8b5..000000000000
--- a/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-* Atmel Direct Memory Access Controller (DMA)
-
-Required properties:
-- compatible: Should be "atmel,-dma"
-- reg: Should contain DMA registers location and length
-- interrupts: Should contain DMA interrupt
-
-Examples:
-
-dma@ffffec00 {
- compatible = "atmel,at91sam9g45-dma";
- reg = <0xffffec00 0x200>;
- interrupts = <21>;
-};
diff --git a/trunk/Documentation/devicetree/bindings/i2c/omap-i2c.txt b/trunk/Documentation/devicetree/bindings/i2c/omap-i2c.txt
deleted file mode 100644
index 56564aa4b444..000000000000
--- a/trunk/Documentation/devicetree/bindings/i2c/omap-i2c.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-I2C for OMAP platforms
-
-Required properties :
-- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c"
-- ti,hwmods : Must be "i2c", n being the instance number (1-based)
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Recommended properties :
-- clock-frequency : Desired I2C bus clock frequency in Hz. Otherwise
- the default 100 kHz frequency will be used.
-
-Optional properties:
-- Child nodes conforming to i2c bus binding
-
-Note: Current implementation will fetch base address, irq and dma
-from omap hwmod data base during device registration.
-Future plan is to migrate hwmod data base contents into device tree
-blob so that, all the required data will be used from device tree dts
-file.
-
-Examples :
-
-i2c1: i2c@0 {
- compatible = "ti,omap3-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- ti,hwmods = "i2c1";
- clock-frequency = <400000>;
-};
diff --git a/trunk/Documentation/dmaengine.txt b/trunk/Documentation/dmaengine.txt
index bbe6cb3d1856..94b7e0f96b38 100644
--- a/trunk/Documentation/dmaengine.txt
+++ b/trunk/Documentation/dmaengine.txt
@@ -75,10 +75,6 @@ The slave DMA usage consists of following steps:
slave_sg - DMA a list of scatter gather buffers from/to a peripheral
dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
operation is explicitly stopped.
- interleaved_dma - This is common to Slave as well as M2M clients. For slave
- address of devices' fifo could be already known to the driver.
- Various types of operations could be expressed by setting
- appropriate values to the 'dma_interleaved_template' members.
A non-NULL return of this transfer API represents a "descriptor" for
the given transaction.
@@ -93,10 +89,6 @@ The slave DMA usage consists of following steps:
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
- struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
- struct dma_chan *chan, struct dma_interleaved_template *xt,
- unsigned long flags);
-
The peripheral driver is expected to have mapped the scatterlist for
the DMA operation prior to calling device_prep_slave_sg, and must
keep the scatterlist mapped until the DMA operation has completed.
diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt
index 1bea46a54b1c..d725c0dfe032 100644
--- a/trunk/Documentation/feature-removal-schedule.txt
+++ b/trunk/Documentation/feature-removal-schedule.txt
@@ -439,6 +439,17 @@ Who: Jean Delvare
----------------------------
+What: For VIDIOC_S_FREQUENCY the type field must match the device node's type.
+ If not, return -EINVAL.
+When: 3.2
+Why: It makes no sense to switch the tuner to radio mode by calling
+ VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
+ calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
+ move to more consistent handling of tv and radio tuners.
+Who: Hans Verkuil
+
+----------------------------
+
What: Opening a radio device node will no longer automatically switch the
tuner mode from tv to radio.
When: 3.3
diff --git a/trunk/Documentation/ioctl/ioctl-number.txt b/trunk/Documentation/ioctl/ioctl-number.txt
index 4840334ea97b..54078ed96b37 100644
--- a/trunk/Documentation/ioctl/ioctl-number.txt
+++ b/trunk/Documentation/ioctl/ioctl-number.txt
@@ -149,7 +149,6 @@ Code Seq#(hex) Include File Comments
'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h
'M' 00-0F drivers/video/fsl-diu-fb.h conflict!
'N' 00-1F drivers/usb/scanner.h
-'N' 40-7F drivers/block/nvme.c
'O' 00-06 mtd/ubi-user.h UBI
'P' all linux/soundcard.h conflict!
'P' 60-6F sound/sscape_ioctl.h conflict!
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index 033d4e69b43b..b29f3c416296 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -1059,11 +1059,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
By default, super page will be supported if Intel IOMMU
has the capability. With this option, super page will
not be supported.
-
- intel_idle.max_cstate= [KNL,HW,ACPI,X86]
- 0 disables intel_idle and fall back on acpi_idle.
- 1 to 6 specify maximum depth of C-state.
-
intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default)
off disable Interrupt Remapping
diff --git a/trunk/Documentation/scsi/ChangeLog.megaraid_sas b/trunk/Documentation/scsi/ChangeLog.megaraid_sas
index 57566bacb4c5..64adb98b181c 100644
--- a/trunk/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/trunk/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,13 +1,3 @@
-Release Date : Fri. Jan 6, 2012 17:00:00 PST 2010 -
- (emaild-id:megaraidlinux@lsi.com)
- Adam Radford
-Current Version : 00.00.06.14-rc1
-Old Version : 00.00.06.12-rc1
- 1. Fix reglockFlags for degraded raid5/6 for MR 9360/9380.
- 2. Mask off flags in ioctl path to prevent memory scribble with older
- MegaCLI versions.
- 3. Remove poll_mode_io module paramater, sysfs node, and associated code.
--------------------------------------------------------------------------------
Release Date : Wed. Oct 5, 2011 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/trunk/Documentation/scsi/LICENSE.qla4xxx b/trunk/Documentation/scsi/LICENSE.qla4xxx
index ab899591ecb7..494980e40491 100644
--- a/trunk/Documentation/scsi/LICENSE.qla4xxx
+++ b/trunk/Documentation/scsi/LICENSE.qla4xxx
@@ -1,11 +1,32 @@
Copyright (c) 2003-2011 QLogic Corporation
-QLogic Linux iSCSI Driver
+QLogic Linux iSCSI HBA Driver
This program includes a device driver for Linux 3.x.
You may modify and redistribute the device driver code under the
GNU General Public License (a copy of which is attached hereto as
Exhibit A) published by the Free Software Foundation (version 2).
+REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
+THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
+CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
+OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
+TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
+ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+COMBINATION WITH THIS PROGRAM.
+
EXHIBIT A
diff --git a/trunk/Documentation/target/tcm_mod_builder.py b/trunk/Documentation/target/tcm_mod_builder.py
index 6e21b8b52638..7ef9b843d529 100755
--- a/trunk/Documentation/target/tcm_mod_builder.py
+++ b/trunk/Documentation/target/tcm_mod_builder.py
@@ -230,9 +230,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include \n"
buf += "#include \n\n"
buf += "#include \n"
- buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
buf += "#include \n"
+ buf += "#include \n"
buf += "#include \n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -255,7 +260,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
- buf += " if (!se_nacl_new)\n"
+ buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
@@ -303,7 +308,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
- buf += " if (!tpg) {\n"
+ buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
@@ -339,7 +344,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
- buf += " if (!" + fabric_mod_port + ") {\n"
+ buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
@@ -347,7 +352,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
- buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
+ buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
@@ -386,7 +391,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
- buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
+ buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
+ buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
@@ -399,12 +405,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
+ buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
+ buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
@@ -431,9 +439,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
- buf += " if (IS_ERR(fabric)) {\n"
+ buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
- buf += " return PTR_ERR(fabric);\n"
+ buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
@@ -467,9 +475,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
- buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
+ buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
- buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
+ buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
@@ -484,15 +492,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
- buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
+ buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
+ buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
+ buf += "#endif\n"
ret = p.write(buf)
if ret:
@@ -504,7 +514,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
def tcm_mod_scan_fabric_ops(tcm_dir):
- fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
+ fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
@@ -569,7 +579,11 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "#include \n"
buf += "#include \n\n"
buf += "#include \n"
- buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
+ buf += "#include \n"
buf += "#include \n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -774,7 +788,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
- buf += " if (!nacl) {\n"
+ buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
@@ -801,7 +815,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
- if re.search('\*release_cmd\)\(', fo):
+ if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
@@ -885,6 +899,13 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
+ if re.search('new_cmd_failure\)\(', fo):
+ buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
+ buf += "{\n"
+ buf += " return;\n"
+ buf += "}\n\n"
+ bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
+
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
@@ -927,6 +948,15 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
+ if re.search('pack_lun\)\(', fo):
+ buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
+ buf += "{\n"
+ buf += " WARN_ON(lun >= 256);\n"
+ buf += " /* Caller wants this byte-swapped */\n"
+ buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
+ buf += "}\n\n"
+ bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
+
ret = p.write(buf)
if ret:
diff --git a/trunk/Documentation/video4linux/v4l2-controls.txt b/trunk/Documentation/video4linux/v4l2-controls.txt
index e2492a9d1027..26aa0573933e 100644
--- a/trunk/Documentation/video4linux/v4l2-controls.txt
+++ b/trunk/Documentation/video4linux/v4l2-controls.txt
@@ -666,6 +666,27 @@ a control of this type whenever the first control belonging to a new control
class is added.
+Differences from the Spec
+=========================
+
+There are a few places where the framework acts slightly differently from the
+V4L2 Specification. Those differences are described in this section. We will
+have to see whether we need to adjust the spec or not.
+
+1) It is no longer required to have all controls contained in a
+v4l2_ext_control array be from the same control class. The framework will be
+able to handle any type of control in the array. You need to set ctrl_class
+to 0 in order to enable this. If ctrl_class is non-zero, then it will still
+check that all controls belong to that control class.
+
+If you set ctrl_class to 0 and count to 0, then it will only return an error
+if there are no controls at all.
+
+2) Clarified the way error_idx works. For get and set it will be equal to
+count if nothing was done yet. If it is less than count then only the controls
+up to error_idx-1 were successfully applied.
+
+
Proposals for Extensions
========================
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 89b70df91f4f..2a90101309d1 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -745,7 +745,6 @@ M: Barry Song
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-prima2/
-F: drivers/dma/sirf-dma*
ARM/EBSA110 MACHINE SUPPORT
M: Russell King
@@ -1412,7 +1411,6 @@ F: net/ax25/
B43 WIRELESS DRIVER
M: Stefano Brivio
L: linux-wireless@vger.kernel.org
-L: b43-dev@lists.infradead.org (moderated for non-subscribers)
W: http://linuxwireless.org/en/users/Drivers/b43
S: Maintained
F: drivers/net/wireless/b43/
@@ -1589,13 +1587,6 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/bnx2fc/
-BROADCOM SPECIFIC AMBA DRIVER (BCMA)
-M: Rafał Miłecki
-L: linux-wireless@vger.kernel.org
-S: Maintained
-F: drivers/bcma/
-F: include/linux/bcma/
-
BROCADE BFA FC SCSI DRIVER
M: Jing Huang
L: linux-scsi@vger.kernel.org
@@ -5855,7 +5846,7 @@ F: drivers/mmc/host/sdhci-spear.c
SECURITY SUBSYSTEM
M: James Morris
L: linux-security-module@vger.kernel.org (suggested Cc:)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
W: http://security.wiki.kernel.org/
S: Supported
F: security/
@@ -6125,6 +6116,13 @@ S: Maintained
F: drivers/ssb/
F: include/linux/ssb/
+BROADCOM SPECIFIC AMBA DRIVER (BCMA)
+M: Rafał Miłecki
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/bcma/
+F: include/linux/bcma/
+
SONY VAIO CONTROL DEVICE DRIVER
M: Mattia Dongili
L: platform-driver-x86@vger.kernel.org
@@ -7200,7 +7198,7 @@ S: Maintained
F: drivers/net/vmxnet3/
VMware PVSCSI driver
-M: Arvind Kumar
+M: Alok Kataria
M: VMware PV-Drivers
L: linux-scsi@vger.kernel.org
S: Maintained
diff --git a/trunk/arch/arm/include/asm/kprobes.h b/trunk/arch/arm/include/asm/kprobes.h
index f82ec22eeb11..feec86768f9c 100644
--- a/trunk/arch/arm/include/asm/kprobes.h
+++ b/trunk/arch/arm/include/asm/kprobes.h
@@ -24,6 +24,7 @@
#define MAX_INSN_SIZE 2
#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
+#define regs_return_value(regs) ((regs)->ARM_r0)
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
diff --git a/trunk/arch/arm/include/asm/ptrace.h b/trunk/arch/arm/include/asm/ptrace.h
index 451808ba1211..96187ff58c24 100644
--- a/trunk/arch/arm/include/asm/ptrace.h
+++ b/trunk/arch/arm/include/asm/ptrace.h
@@ -189,11 +189,6 @@ static inline int valid_user_regs(struct pt_regs *regs)
return 0;
}
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->ARM_r0;
-}
-
#define instruction_pointer(regs) (regs)->ARM_pc
#ifdef CONFIG_SMP
diff --git a/trunk/arch/arm/include/asm/thread_info.h b/trunk/arch/arm/include/asm/thread_info.h
index d4c24d412a8d..0f30c3a78fc1 100644
--- a/trunk/arch/arm/include/asm/thread_info.h
+++ b/trunk/arch/arm/include/asm/thread_info.h
@@ -129,7 +129,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCAL_AUDIT - syscall auditing active
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
@@ -140,7 +139,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -151,15 +149,11 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-/* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
-
/*
* Change these and you break ASM code in entry-common.S
*/
diff --git a/trunk/arch/arm/include/asm/unified.h b/trunk/arch/arm/include/asm/unified.h
index f5989f46b4d2..bc631161e9c6 100644
--- a/trunk/arch/arm/include/asm/unified.h
+++ b/trunk/arch/arm/include/asm/unified.h
@@ -37,8 +37,8 @@
#define THUMB(x...) x
#ifdef __ASSEMBLY__
#define W(instr) instr.w
-#define BSYM(sym) sym + 1
#endif
+#define BSYM(sym) sym + 1
#else /* !CONFIG_THUMB2_KERNEL */
@@ -49,8 +49,8 @@
#define THUMB(x...)
#ifdef __ASSEMBLY__
#define W(instr) instr
-#define BSYM(sym) sym
#endif
+#define BSYM(sym) sym
#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/trunk/arch/arm/kernel/entry-common.S b/trunk/arch/arm/kernel/entry-common.S
index 520889cf1b5b..b2a27b6b0046 100644
--- a/trunk/arch/arm/kernel/entry-common.S
+++ b/trunk/arch/arm/kernel/entry-common.S
@@ -87,7 +87,7 @@ ENTRY(ret_from_fork)
get_thread_info tsk
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
- tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
@@ -443,7 +443,7 @@ ENTRY(vector_swi)
1:
#endif
- tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
diff --git a/trunk/arch/arm/kernel/ptrace.c b/trunk/arch/arm/kernel/ptrace.c
index e1d5e1929fbd..483727ad6892 100644
--- a/trunk/arch/arm/kernel/ptrace.c
+++ b/trunk/arch/arm/kernel/ptrace.c
@@ -906,6 +906,11 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return scno;
+ if (!(current->ptrace & PT_PTRACED))
+ return scno;
+
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
@@ -913,17 +918,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
ip = regs->ARM_ip;
regs->ARM_ip = why;
- if (!ip)
- audit_syscall_exit(regs);
- else
- audit_syscall_entry(AUDIT_ARCH_ARMEB, scno, regs->ARM_r0,
- regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
-
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return scno;
- if (!(current->ptrace & PT_PTRACED))
- return scno;
-
current_thread_info()->syscall = scno;
/* the 0x80 provides a way for the tracing parent to distinguish
diff --git a/trunk/arch/arm/mach-ep93xx/include/mach/dma.h b/trunk/arch/arm/mach-ep93xx/include/mach/dma.h
index e82c642fa53c..46d4d876e6fb 100644
--- a/trunk/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/trunk/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -37,7 +37,7 @@
*/
struct ep93xx_dma_data {
int port;
- enum dma_transfer_direction direction;
+ enum dma_data_direction direction;
const char *name;
};
@@ -80,14 +80,14 @@ static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
* channel supports given DMA direction. Only M2P channels have such
* limitation, for M2M channels the direction is configurable.
*/
-static inline enum dma_transfer_direction
+static inline enum dma_data_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
return DMA_NONE;
/* even channels are for TX, odd for RX */
- return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
#endif /* __ASM_ARCH_DMA_H */
diff --git a/trunk/arch/arm/mach-exynos/headsmp.S b/trunk/arch/arm/mach-exynos/headsmp.S
index 5364d4bfa8bc..3cdeb3647542 100644
--- a/trunk/arch/arm/mach-exynos/headsmp.S
+++ b/trunk/arch/arm/mach-exynos/headsmp.S
@@ -36,8 +36,6 @@ pen: ldr r7, [r6]
* should now contain the SVC stack for this core
*/
b secondary_startup
-ENDPROC(exynos4_secondary_startup)
- .align 2
1: .long .
.long pen_release
diff --git a/trunk/arch/arm/mach-exynos/mach-origen.c b/trunk/arch/arm/mach-exynos/mach-origen.c
index 0679b8ad2d1e..2b11e046d391 100644
--- a/trunk/arch/arm/mach-exynos/mach-origen.c
+++ b/trunk/arch/arm/mach-exynos/mach-origen.c
@@ -597,8 +597,7 @@ static struct s3c_fb_pd_win origen_fb_win0 = {
static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
.win[0] = &origen_fb_win0,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
- .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
- VIDCON1_INV_VCLK,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
.setup_gpio = exynos4_fimd0_gpio_setup_24bpp,
};
diff --git a/trunk/arch/arm/mach-exynos/platsmp.c b/trunk/arch/arm/mach-exynos/platsmp.c
index 683aec786b78..60bc45e3e709 100644
--- a/trunk/arch/arm/mach-exynos/platsmp.c
+++ b/trunk/arch/arm/mach-exynos/platsmp.c
@@ -24,6 +24,7 @@
#include
#include
#include
+#include
#include
#include
@@ -136,7 +137,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
while (time_before(jiffies, timeout)) {
smp_rmb();
- __raw_writel(virt_to_phys(exynos4_secondary_startup),
+ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
CPU1_BOOT_REG);
gic_raise_softirq(cpumask_of(cpu), 1);
@@ -191,6 +192,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
- __raw_writel(virt_to_phys(exynos4_secondary_startup),
+ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
CPU1_BOOT_REG);
}
diff --git a/trunk/arch/arm/mach-highbank/highbank.c b/trunk/arch/arm/mach-highbank/highbank.c
index 7afbe1e55beb..804c4a55f803 100644
--- a/trunk/arch/arm/mach-highbank/highbank.c
+++ b/trunk/arch/arm/mach-highbank/highbank.c
@@ -25,6 +25,7 @@
#include
#include
+#include
#include
#include
#include
@@ -75,7 +76,7 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr)
#ifdef CONFIG_SMP
cpu = cpu_logical_map(cpu);
#endif
- writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
+ writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
HB_JUMP_TABLE_PHYS(cpu) + 15);
diff --git a/trunk/arch/arm/mach-imx/src.c b/trunk/arch/arm/mach-imx/src.c
index 29bd1243781e..4bde04f99e38 100644
--- a/trunk/arch/arm/mach-imx/src.c
+++ b/trunk/arch/arm/mach-imx/src.c
@@ -15,6 +15,7 @@
#include
#include
#include
+#include
#define SRC_SCR 0x000
#define SRC_GPR1 0x020
@@ -42,7 +43,7 @@ void imx_enable_cpu(int cpu, bool enable)
void imx_set_cpu_jump(int cpu, void *jump_addr)
{
cpu = cpu_logical_map(cpu);
- writel_relaxed(virt_to_phys(jump_addr),
+ writel_relaxed(BSYM(virt_to_phys(jump_addr)),
src_base + SRC_GPR1 + cpu * 8);
}
diff --git a/trunk/arch/arm/mach-msm/headsmp.S b/trunk/arch/arm/mach-msm/headsmp.S
index bcd5af223dea..0c631a9f8647 100644
--- a/trunk/arch/arm/mach-msm/headsmp.S
+++ b/trunk/arch/arm/mach-msm/headsmp.S
@@ -34,7 +34,6 @@ pen: ldr r7, [r6]
* should now contain the SVC stack for this core
*/
b secondary_startup
-ENDPROC(msm_secondary_startup)
.align
1: .long .
diff --git a/trunk/arch/arm/mach-msm/vreg.c b/trunk/arch/arm/mach-msm/vreg.c
index bd66ed04d6dc..a9103bc6615f 100644
--- a/trunk/arch/arm/mach-msm/vreg.c
+++ b/trunk/arch/arm/mach-msm/vreg.c
@@ -19,7 +19,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/arch/arm/mach-picoxcell/time.c b/trunk/arch/arm/mach-picoxcell/time.c
index 2ecba6743b8e..6c89cf8ab22e 100644
--- a/trunk/arch/arm/mach-picoxcell/time.c
+++ b/trunk/arch/arm/mach-picoxcell/time.c
@@ -67,7 +67,7 @@ static void picoxcell_add_clocksource(struct device_node *source_timer)
static void __iomem *sched_io_base;
-static u32 picoxcell_read_sched_clock(void)
+unsigned u32 notrace picoxcell_read_sched_clock(void)
{
return __raw_readl(sched_io_base);
}
diff --git a/trunk/arch/arm/mach-realview/platsmp.c b/trunk/arch/arm/mach-realview/platsmp.c
index 17c878ddbc70..e83c654a58d0 100644
--- a/trunk/arch/arm/mach-realview/platsmp.c
+++ b/trunk/arch/arm/mach-realview/platsmp.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include
#include
@@ -74,6 +75,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the
* secondary CPU branches to this address.
*/
- __raw_writel(virt_to_phys(versatile_secondary_startup),
+ __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)),
__io_address(REALVIEW_SYS_FLAGSSET));
}
diff --git a/trunk/arch/arm/mach-s3c64xx/include/mach/crag6410.h b/trunk/arch/arm/mach-s3c64xx/include/mach/crag6410.h
index 4cb2f951f1e9..5d55ab018b6b 100644
--- a/trunk/arch/arm/mach-s3c64xx/include/mach/crag6410.h
+++ b/trunk/arch/arm/mach-s3c64xx/include/mach/crag6410.h
@@ -21,6 +21,5 @@
#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
#define GLENFARCLAS_PMIC_GPIO_BASE (GPIO_BOARD_START + 32)
#define BANFF_PMIC_GPIO_BASE (GPIO_BOARD_START + 64)
-#define MMGPIO_GPIO_BASE (GPIO_BOARD_START + 96)
#endif
diff --git a/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c b/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c
index 8077f650eb0e..1cc91d794c97 100644
--- a/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/trunk/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -260,7 +260,6 @@ static struct platform_device crag6410_dm9k_device = {
static struct resource crag6410_mmgpio_resource[] = {
[0] = {
- .name = "dat",
.start = S3C64XX_PA_XM0CSN4 + 1,
.end = S3C64XX_PA_XM0CSN4 + 1,
.flags = IORESOURCE_MEM,
@@ -273,7 +272,7 @@ static struct platform_device crag6410_mmgpio = {
.resource = crag6410_mmgpio_resource,
.num_resources = ARRAY_SIZE(crag6410_mmgpio_resource),
.dev.platform_data = &(struct bgpio_pdata) {
- .base = MMGPIO_GPIO_BASE,
+ .base = -1,
},
};
@@ -329,6 +328,7 @@ static struct platform_device wallvdd_device = {
static struct platform_device *crag6410_devices[] __initdata = {
&s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
&s3c_device_hsmmc2,
&s3c_device_i2c0,
&s3c_device_i2c1,
@@ -355,7 +355,7 @@ static struct platform_device *crag6410_devices[] __initdata = {
static struct pca953x_platform_data crag6410_pca_data = {
.gpio_base = PCA935X_GPIO_BASE,
- .irq_base = -1,
+ .irq_base = 0,
};
/* VDDARM is controlled by DVS1 connected to GPK(0) */
@@ -683,6 +683,12 @@ static struct s3c_sdhci_platdata crag6410_hsmmc2_pdata = {
.cd_type = S3C_SDHCI_CD_PERMANENT,
};
+static struct s3c_sdhci_platdata crag6410_hsmmc1_pdata = {
+ .max_width = 4,
+ .cd_type = S3C_SDHCI_CD_GPIO,
+ .ext_cd_gpio = S3C64XX_GPF(11),
+};
+
static void crag6410_cfg_sdhci0(struct platform_device *dev, int width)
{
/* Set all the necessary GPG pins to special-function 2 */
@@ -717,6 +723,7 @@ static void __init crag6410_machine_init(void)
gpio_direction_output(S3C64XX_GPF(10), 1);
s3c_sdhci0_set_platdata(&crag6410_hsmmc0_pdata);
+ s3c_sdhci1_set_platdata(&crag6410_hsmmc1_pdata);
s3c_sdhci2_set_platdata(&crag6410_hsmmc2_pdata);
s3c_i2c0_set_platdata(&i2c0_pdata);
diff --git a/trunk/arch/arm/mach-s3c64xx/pm.c b/trunk/arch/arm/mach-s3c64xx/pm.c
index 7d3e81b9dd06..055dac90e0e2 100644
--- a/trunk/arch/arm/mach-s3c64xx/pm.c
+++ b/trunk/arch/arm/mach-s3c64xx/pm.c
@@ -346,10 +346,23 @@ int __init s3c64xx_pm_init(void)
static __init int s3c64xx_pm_initcall(void)
{
+ u32 val;
+
pm_cpu_prep = s3c64xx_pm_prepare;
pm_cpu_sleep = s3c64xx_cpu_suspend;
pm_uart_udivslot = 1;
+ /*
+ * Unconditionally disable power domains that contain only
+ * blocks which have no mainline driver support.
+ */
+ val = __raw_readl(S3C64XX_NORMAL_CFG);
+ val &= ~(S3C64XX_NORMALCFG_DOMAIN_G_ON |
+ S3C64XX_NORMALCFG_DOMAIN_V_ON |
+ S3C64XX_NORMALCFG_DOMAIN_I_ON |
+ S3C64XX_NORMALCFG_DOMAIN_P_ON);
+ __raw_writel(val, S3C64XX_NORMAL_CFG);
+
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
diff --git a/trunk/arch/arm/mach-shmobile/setup-sh7372.c b/trunk/arch/arm/mach-shmobile/setup-sh7372.c
index 6fcf304d3cdf..1ea89be63e29 100644
--- a/trunk/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/trunk/arch/arm/mach-shmobile/setup-sh7372.c
@@ -445,39 +445,31 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
},
};
-#define SH7372_CHCLR 0x220
-
static const struct sh_dmae_channel sh7372_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
- .chclr_offset = SH7372_CHCLR + 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
- .chclr_offset = SH7372_CHCLR + 0x10,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
- .chclr_offset = SH7372_CHCLR + 0x20,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
- .chclr_offset = SH7372_CHCLR + 0x30,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
- .chclr_offset = SH7372_CHCLR + 0x50,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
- .chclr_offset = SH7372_CHCLR + 0x60,
}
};
@@ -495,7 +487,6 @@ static struct sh_dmae_pdata dma_platform_data = {
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_DME,
- .chclr_present = 1,
};
/* Resource order important! */
@@ -503,7 +494,7 @@ static struct resource sh7372_dmae0_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfe008020,
- .end = 0xfe00828f,
+ .end = 0xfe00808f,
.flags = IORESOURCE_MEM,
},
{
@@ -531,7 +522,7 @@ static struct resource sh7372_dmae1_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfe018020,
- .end = 0xfe01828f,
+ .end = 0xfe01808f,
.flags = IORESOURCE_MEM,
},
{
@@ -559,7 +550,7 @@ static struct resource sh7372_dmae2_resources[] = {
{
/* Channel registers and DMAOR */
.start = 0xfe028020,
- .end = 0xfe02828f,
+ .end = 0xfe02808f,
.flags = IORESOURCE_MEM,
},
{
diff --git a/trunk/arch/arm/mach-ux500/headsmp.S b/trunk/arch/arm/mach-ux500/headsmp.S
index 08da5589bcd8..64fa451edcfd 100644
--- a/trunk/arch/arm/mach-ux500/headsmp.S
+++ b/trunk/arch/arm/mach-ux500/headsmp.S
@@ -32,8 +32,6 @@ pen: ldr r7, [r6]
* should now contain the SVC stack for this core
*/
b secondary_startup
-ENDPROC(u8500_secondary_startup)
- .align 2
1: .long .
.long pen_release
diff --git a/trunk/arch/arm/mach-vexpress/platsmp.c b/trunk/arch/arm/mach-vexpress/platsmp.c
index 124ffb169093..2b5f7ac001a3 100644
--- a/trunk/arch/arm/mach-vexpress/platsmp.c
+++ b/trunk/arch/arm/mach-vexpress/platsmp.c
@@ -13,6 +13,8 @@
#include
#include
+#include
+
#include
#define V2M_PA_CS7 0x10000000
@@ -44,6 +46,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* secondary CPU branches to this address.
*/
writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
- writel(virt_to_phys(versatile_secondary_startup),
+ writel(BSYM(virt_to_phys(versatile_secondary_startup)),
MMIO_P2V(V2M_SYS_FLAGSSET));
}
diff --git a/trunk/arch/arm/plat-mxc/include/mach/mx3fb.h b/trunk/arch/arm/plat-mxc/include/mach/mx3fb.h
index fdbe60001542..ac24c5c4bc83 100644
--- a/trunk/arch/arm/plat-mxc/include/mach/mx3fb.h
+++ b/trunk/arch/arm/plat-mxc/include/mach/mx3fb.h
@@ -22,20 +22,6 @@
#define FB_SYNC_SWAP_RGB 0x04000000
#define FB_SYNC_CLK_SEL_EN 0x02000000
-/*
- * Specify the way your display is connected. The IPU can arbitrarily
- * map the internal colors to the external data lines. We only support
- * the following mappings at the moment.
- */
-enum disp_data_mapping {
- /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
- IPU_DISP_DATA_MAPPING_RGB666,
- /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
- IPU_DISP_DATA_MAPPING_RGB565,
- /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
- IPU_DISP_DATA_MAPPING_RGB888,
-};
-
/**
* struct mx3fb_platform_data - mx3fb platform data
*
@@ -47,7 +33,6 @@ struct mx3fb_platform_data {
const char *name;
const struct fb_videomode *mode;
int num_modes;
- enum disp_data_mapping disp_data_fmt;
};
#endif
diff --git a/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index fd0ee84c45d1..685c78716d95 100644
--- a/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/trunk/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -113,8 +113,7 @@ struct stedma40_half_channel_info {
* @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel
- * @use_fixed_channel: if true, use physical channel specified by phy_channel
- * @phy_channel: physical channel to use, only if use_fixed_channel is true
+ *
*
* This structure has to be filled by the client drivers.
* It is recommended to do all dma configurations for clients in the machine.
@@ -130,9 +129,6 @@ struct stedma40_chan_cfg {
int dst_dev_type;
struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info;
-
- bool use_fixed_channel;
- int phy_channel;
};
/**
@@ -157,7 +153,6 @@ struct stedma40_platform_data {
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
int disabled_channels[STEDMA40_MAX_PHYS];
- bool use_esram_lcla;
};
#ifdef CONFIG_STE_DMA40
@@ -192,7 +187,7 @@ static inline struct
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
dma_addr_t addr,
unsigned int size,
- enum dma_transfer_direction direction,
+ enum dma_data_direction direction,
unsigned long flags)
{
struct scatterlist sg;
@@ -214,7 +209,7 @@ static inline struct
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
dma_addr_t addr,
unsigned int size,
- enum dma_transfer_direction direction,
+ enum dma_data_direction direction,
unsigned long flags)
{
return NULL;
diff --git a/trunk/arch/arm/plat-samsung/dma-ops.c b/trunk/arch/arm/plat-samsung/dma-ops.c
index 0747c77a2fd5..2cded872f22b 100644
--- a/trunk/arch/arm/plat-samsung/dma-ops.c
+++ b/trunk/arch/arm/plat-samsung/dma-ops.c
@@ -37,14 +37,14 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
(void *)dma_ch;
chan = dma_request_channel(mask, pl330_filter, filter_param);
- if (info->direction == DMA_DEV_TO_MEM) {
+ if (info->direction == DMA_FROM_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = info->direction;
slave_config.src_addr = info->fifo;
slave_config.src_addr_width = info->width;
slave_config.src_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
- } else if (info->direction == DMA_MEM_TO_DEV) {
+ } else if (info->direction == DMA_TO_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = info->direction;
slave_config.dst_addr = info->fifo;
diff --git a/trunk/arch/arm/plat-samsung/include/plat/dma-ops.h b/trunk/arch/arm/plat-samsung/include/plat/dma-ops.h
index 71a6827c7706..22eafc310bd7 100644
--- a/trunk/arch/arm/plat-samsung/include/plat/dma-ops.h
+++ b/trunk/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -14,11 +14,10 @@
#define __SAMSUNG_DMA_OPS_H_ __FILE__
#include
-#include
struct samsung_dma_prep_info {
enum dma_transaction_type cap;
- enum dma_transfer_direction direction;
+ enum dma_data_direction direction;
dma_addr_t buf;
unsigned long period;
unsigned long len;
@@ -28,7 +27,7 @@ struct samsung_dma_prep_info {
struct samsung_dma_info {
enum dma_transaction_type cap;
- enum dma_transfer_direction direction;
+ enum dma_data_direction direction;
enum dma_slave_buswidth width;
dma_addr_t fifo;
struct s3c2410_dma_client *client;
diff --git a/trunk/arch/arm/plat-samsung/include/plat/dma.h b/trunk/arch/arm/plat-samsung/include/plat/dma.h
index 7b02143ccd9a..b9061128abde 100644
--- a/trunk/arch/arm/plat-samsung/include/plat/dma.h
+++ b/trunk/arch/arm/plat-samsung/include/plat/dma.h
@@ -10,9 +10,6 @@
* published by the Free Software Foundation.
*/
-#ifndef __PLAT_DMA_H
-#define __PLAT_DMA_H
-
#include
enum s3c2410_dma_buffresult {
@@ -125,6 +122,5 @@ extern int s3c2410_dma_getposition(enum dma_ch channel,
extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
-#include
-#endif
+#include
diff --git a/trunk/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/trunk/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index fa95e9a00972..aea68b60ef98 100644
--- a/trunk/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/trunk/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -11,8 +11,6 @@
#ifndef __S3C64XX_PLAT_SPI_H
#define __S3C64XX_PLAT_SPI_H
-struct platform_device;
-
/**
* struct s3c64xx_spi_csinfo - ChipSelect description
* @fb_delay: Slave specific feedback delay.
diff --git a/trunk/arch/arm/plat-versatile/headsmp.S b/trunk/arch/arm/plat-versatile/headsmp.S
index dd703ef09b8d..d397a1fb2f54 100644
--- a/trunk/arch/arm/plat-versatile/headsmp.S
+++ b/trunk/arch/arm/plat-versatile/headsmp.S
@@ -38,4 +38,3 @@ pen: ldr r7, [r6]
.align
1: .long .
.long pen_release
-ENDPROC(versatile_secondary_startup)
diff --git a/trunk/arch/ia64/include/asm/ptrace.h b/trunk/arch/ia64/include/asm/ptrace.h
index 68c98f5b3ca6..f5cb27614e35 100644
--- a/trunk/arch/ia64/include/asm/ptrace.h
+++ b/trunk/arch/ia64/include/asm/ptrace.h
@@ -246,18 +246,7 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
return regs->ar_bspstore;
}
-static inline int is_syscall_success(struct pt_regs *regs)
-{
- return regs->r10 != -1;
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
- if (is_syscall_success(regs))
- return regs->r8;
- else
- return -regs->r8;
-}
+#define regs_return_value(regs) ((regs)->r8)
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c
index 5207035dc061..bfb4d01e0e51 100644
--- a/trunk/arch/ia64/kernel/acpi.c
+++ b/trunk/arch/ia64/kernel/acpi.c
@@ -429,24 +429,22 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
static struct acpi_table_slit __initdata *slit_table;
cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
-static int __init
-get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
pxm = pa->proximity_domain_lo;
- if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
+ if (ia64_platform_is("sn2"))
pxm += pa->proximity_domain_hi[0] << 8;
return pxm;
}
-static int __init
-get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
{
int pxm;
pxm = ma->proximity_domain;
- if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
+ if (!ia64_platform_is("sn2"))
pxm &= 0xff;
return pxm;
diff --git a/trunk/arch/ia64/kernel/ptrace.c b/trunk/arch/ia64/kernel/ptrace.c
index dad91661ddf9..8848f43d819e 100644
--- a/trunk/arch/ia64/kernel/ptrace.c
+++ b/trunk/arch/ia64/kernel/ptrace.c
@@ -1246,8 +1246,15 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
if (test_thread_flag(TIF_RESTORE_RSE))
ia64_sync_krbs();
+ if (unlikely(current->audit_context)) {
+ long syscall;
+ int arch;
- audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
+ syscall = regs.r15;
+ arch = AUDIT_ARCH_IA64;
+
+ audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
+ }
return 0;
}
@@ -1261,7 +1268,14 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
{
int step;
- audit_syscall_exit(®s);
+ if (unlikely(current->audit_context)) {
+ int success = AUDITSC_RESULT(regs.r10);
+ long result = regs.r8;
+
+ if (success != AUDITSC_SUCCESS)
+ result = -result;
+ audit_syscall_exit(success, result);
+ }
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
diff --git a/trunk/arch/microblaze/include/asm/ptrace.h b/trunk/arch/microblaze/include/asm/ptrace.h
index 94e92c805859..816bee64b196 100644
--- a/trunk/arch/microblaze/include/asm/ptrace.h
+++ b/trunk/arch/microblaze/include/asm/ptrace.h
@@ -61,11 +61,6 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->r3;
-}
-
#else /* __KERNEL__ */
/* pt_regs offsets used by gdbserver etc in ptrace syscalls */
diff --git a/trunk/arch/microblaze/kernel/ptrace.c b/trunk/arch/microblaze/kernel/ptrace.c
index 6eb2aa927d89..043cb58f9c44 100644
--- a/trunk/arch/microblaze/kernel/ptrace.c
+++ b/trunk/arch/microblaze/kernel/ptrace.c
@@ -147,8 +147,10 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
*/
ret = -1L;
- audit_syscall_entry(EM_MICROBLAZE, regs->r12, regs->r5, regs->r6,
- regs->r7, regs->r8);
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(EM_MICROBLAZE, regs->r12,
+ regs->r5, regs->r6,
+ regs->r7, regs->r8);
return ret ?: regs->r12;
}
@@ -157,7 +159,8 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->r3), regs->r3);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c
index d4fc1a971779..604cd9dd1333 100644
--- a/trunk/arch/microblaze/kernel/setup.c
+++ b/trunk/arch/microblaze/kernel/setup.c
@@ -26,7 +26,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -227,23 +226,5 @@ static int __init setup_bus_notifier(void)
return 0;
}
-arch_initcall(setup_bus_notifier);
-
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-
-static int __init topology_init(void)
-{
- int i, ret;
-
- for_each_present_cpu(i) {
- struct cpu *c = &per_cpu(cpu_devices, i);
- ret = register_cpu(c, i);
- if (ret)
- printk(KERN_WARNING "topology_init: register_cpu %d "
- "failed (%d)\n", i, ret);
- }
-
- return 0;
-}
-subsys_initcall(topology_init);
+arch_initcall(setup_bus_notifier);
diff --git a/trunk/arch/mips/include/asm/ptrace.h b/trunk/arch/mips/include/asm/ptrace.h
index 4b7f5252d2fd..7b99c670e478 100644
--- a/trunk/arch/mips/include/asm/ptrace.h
+++ b/trunk/arch/mips/include/asm/ptrace.h
@@ -137,19 +137,7 @@ extern int ptrace_set_watch_regs(struct task_struct *child,
*/
#define user_mode(regs) (((regs)->cp0_status & KU_MASK) == KU_USER)
-static inline int is_syscall_success(struct pt_regs *regs)
-{
- return !regs->regs[7];
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
- if (is_syscall_success(regs))
- return regs->regs[2];
- else
- return -regs->regs[2];
-}
-
+#define regs_return_value(_regs) ((_regs)->regs[2])
#define instruction_pointer(regs) ((regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
diff --git a/trunk/arch/mips/kernel/ptrace.c b/trunk/arch/mips/kernel/ptrace.c
index 7786b608d932..4e6ea1ffad46 100644
--- a/trunk/arch/mips/kernel/ptrace.c
+++ b/trunk/arch/mips/kernel/ptrace.c
@@ -560,9 +560,10 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
}
out:
- audit_syscall_entry(audit_arch(), regs->regs[2],
- regs->regs[4], regs->regs[5],
- regs->regs[6], regs->regs[7]);
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(audit_arch(), regs->regs[2],
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
}
/*
@@ -571,7 +572,9 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
*/
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
+ -regs->regs[2]);
if (!(current->ptrace & PT_PTRACED))
return;
diff --git a/trunk/arch/powerpc/include/asm/ptrace.h b/trunk/arch/powerpc/include/asm/ptrace.h
index 78a205162fd7..48223f9b8728 100644
--- a/trunk/arch/powerpc/include/asm/ptrace.h
+++ b/trunk/arch/powerpc/include/asm/ptrace.h
@@ -86,18 +86,7 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->nip)
#define user_stack_pointer(regs) ((regs)->gpr[1])
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-static inline int is_syscall_success(struct pt_regs *regs)
-{
- return !(regs->ccr & 0x10000000);
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
- if (is_syscall_success(regs))
- return regs->gpr[3];
- else
- return -regs->gpr[3];
-}
+#define regs_return_value(regs) ((regs)->gpr[3])
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c
index 5b43325402bc..5de73dbd15c7 100644
--- a/trunk/arch/powerpc/kernel/ptrace.c
+++ b/trunk/arch/powerpc/kernel/ptrace.c
@@ -1724,20 +1724,22 @@ long do_syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
+ if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
- if (!is_32bit_task())
- audit_syscall_entry(AUDIT_ARCH_PPC64,
- regs->gpr[0],
- regs->gpr[3], regs->gpr[4],
- regs->gpr[5], regs->gpr[6]);
- else
+ if (!is_32bit_task())
+ audit_syscall_entry(AUDIT_ARCH_PPC64,
+ regs->gpr[0],
+ regs->gpr[3], regs->gpr[4],
+ regs->gpr[5], regs->gpr[6]);
+ else
#endif
- audit_syscall_entry(AUDIT_ARCH_PPC,
- regs->gpr[0],
- regs->gpr[3] & 0xffffffff,
- regs->gpr[4] & 0xffffffff,
- regs->gpr[5] & 0xffffffff,
- regs->gpr[6] & 0xffffffff);
+ audit_syscall_entry(AUDIT_ARCH_PPC,
+ regs->gpr[0],
+ regs->gpr[3] & 0xffffffff,
+ regs->gpr[4] & 0xffffffff,
+ regs->gpr[5] & 0xffffffff,
+ regs->gpr[6] & 0xffffffff);
+ }
return ret ?: regs->gpr[0];
}
@@ -1746,7 +1748,9 @@ void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
+ regs->result);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
diff --git a/trunk/arch/s390/include/asm/ptrace.h b/trunk/arch/s390/include/asm/ptrace.h
index aeb77f017985..56da355678f4 100644
--- a/trunk/arch/s390/include/asm/ptrace.h
+++ b/trunk/arch/s390/include/asm/ptrace.h
@@ -541,13 +541,9 @@ struct user_regs_struct
#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
#define user_stack_pointer(regs)((regs)->gprs[15])
+#define regs_return_value(regs)((regs)->gprs[2])
#define profile_pc(regs) instruction_pointer(regs)
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->gprs[2];
-}
-
int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
diff --git a/trunk/arch/s390/kernel/ptrace.c b/trunk/arch/s390/kernel/ptrace.c
index 9d82ed4bcb27..573bc29551ef 100644
--- a/trunk/arch/s390/kernel/ptrace.c
+++ b/trunk/arch/s390/kernel/ptrace.c
@@ -740,17 +740,20 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gprs[2]);
- audit_syscall_entry(is_compat_task() ?
- AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
- regs->gprs[2], regs->orig_gpr2,
- regs->gprs[3], regs->gprs[4],
- regs->gprs[5]);
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(is_compat_task() ?
+ AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
+ regs->gprs[2], regs->orig_gpr2,
+ regs->gprs[3], regs->gprs[4],
+ regs->gprs[5]);
return ret ?: regs->gprs[2];
}
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
{
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
+ regs->gprs[2]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->gprs[2]);
diff --git a/trunk/arch/sh/include/asm/ptrace_32.h b/trunk/arch/sh/include/asm/ptrace_32.h
index 2d3e906aa722..6c2239cca1a2 100644
--- a/trunk/arch/sh/include/asm/ptrace_32.h
+++ b/trunk/arch/sh/include/asm/ptrace_32.h
@@ -76,10 +76,7 @@ struct pt_dspregs {
#ifdef __KERNEL__
#define MAX_REG_OFFSET offsetof(struct pt_regs, tra)
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->regs[0];
-}
+#define regs_return_value(_regs) ((_regs)->regs[0])
#endif /* __KERNEL__ */
diff --git a/trunk/arch/sh/include/asm/ptrace_64.h b/trunk/arch/sh/include/asm/ptrace_64.h
index eb3fcceaf64b..bf9be7764d69 100644
--- a/trunk/arch/sh/include/asm/ptrace_64.h
+++ b/trunk/arch/sh/include/asm/ptrace_64.h
@@ -13,10 +13,7 @@ struct pt_regs {
#ifdef __KERNEL__
#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->regs[3];
-}
+#define regs_return_value(_regs) ((_regs)->regs[3])
#endif /* __KERNEL__ */
diff --git a/trunk/arch/sh/kernel/ptrace_32.c b/trunk/arch/sh/kernel/ptrace_32.c
index a3e651563763..92b3c276339a 100644
--- a/trunk/arch/sh/kernel/ptrace_32.c
+++ b/trunk/arch/sh/kernel/ptrace_32.c
@@ -518,9 +518,10 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[0]);
- audit_syscall_entry(audit_arch(), regs->regs[3],
- regs->regs[4], regs->regs[5],
- regs->regs[6], regs->regs[7]);
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(audit_arch(), regs->regs[3],
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
return ret ?: regs->regs[0];
}
@@ -529,7 +530,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
+ regs->regs[0]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[0]);
diff --git a/trunk/arch/sh/kernel/ptrace_64.c b/trunk/arch/sh/kernel/ptrace_64.c
index 3d0080b5c976..c8f97649f354 100644
--- a/trunk/arch/sh/kernel/ptrace_64.c
+++ b/trunk/arch/sh/kernel/ptrace_64.c
@@ -536,9 +536,10 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[9]);
- audit_syscall_entry(audit_arch(), regs->regs[1],
- regs->regs[2], regs->regs[3],
- regs->regs[4], regs->regs[5]);
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(audit_arch(), regs->regs[1],
+ regs->regs[2], regs->regs[3],
+ regs->regs[4], regs->regs[5]);
return ret ?: regs->regs[9];
}
@@ -547,7 +548,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
+ regs->regs[9]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
diff --git a/trunk/arch/sparc/include/asm/ptrace.h b/trunk/arch/sparc/include/asm/ptrace.h
index c00c3b5c2806..a0e1bcf843a1 100644
--- a/trunk/arch/sparc/include/asm/ptrace.h
+++ b/trunk/arch/sparc/include/asm/ptrace.h
@@ -207,15 +207,7 @@ do { current_thread_info()->syscall_noerror = 1; \
#define instruction_pointer(regs) ((regs)->tpc)
#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
-static inline int is_syscall_success(struct pt_regs *regs)
-{
- return !(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY));
-}
-
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->u_regs[UREG_I0];
-}
+#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *);
#else
diff --git a/trunk/arch/sparc/kernel/ptrace_64.c b/trunk/arch/sparc/kernel/ptrace_64.c
index 9388844cd88c..96ee50a80661 100644
--- a/trunk/arch/sparc/kernel/ptrace_64.c
+++ b/trunk/arch/sparc/kernel/ptrace_64.c
@@ -1071,22 +1071,32 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->u_regs[UREG_G1]);
- audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
- AUDIT_ARCH_SPARC :
- AUDIT_ARCH_SPARC64),
- regs->u_regs[UREG_G1],
- regs->u_regs[UREG_I0],
- regs->u_regs[UREG_I1],
- regs->u_regs[UREG_I2],
- regs->u_regs[UREG_I3]);
+ if (unlikely(current->audit_context) && !ret)
+ audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
+ AUDIT_ARCH_SPARC :
+ AUDIT_ARCH_SPARC64),
+ regs->u_regs[UREG_G1],
+ regs->u_regs[UREG_I0],
+ regs->u_regs[UREG_I1],
+ regs->u_regs[UREG_I2],
+ regs->u_regs[UREG_I3]);
return ret;
}
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
- audit_syscall_exit(regs);
+#ifdef CONFIG_AUDITSYSCALL
+ if (unlikely(current->audit_context)) {
+ unsigned long tstate = regs->tstate;
+ int result = AUDITSC_SUCCESS;
+ if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
+ result = AUDITSC_FAILURE;
+
+ audit_syscall_exit(result, regs->u_regs[UREG_I0]);
+ }
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
diff --git a/trunk/arch/um/kernel/ptrace.c b/trunk/arch/um/kernel/ptrace.c
index 06b190390505..c9da32b0c707 100644
--- a/trunk/arch/um/kernel/ptrace.c
+++ b/trunk/arch/um/kernel/ptrace.c
@@ -167,15 +167,17 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit)
int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit;
int tracesysgood;
- if (!entryexit)
- audit_syscall_entry(HOST_AUDIT_ARCH,
- UPT_SYSCALL_NR(regs),
- UPT_SYSCALL_ARG1(regs),
- UPT_SYSCALL_ARG2(regs),
- UPT_SYSCALL_ARG3(regs),
- UPT_SYSCALL_ARG4(regs));
- else
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context)) {
+ if (!entryexit)
+ audit_syscall_entry(HOST_AUDIT_ARCH,
+ UPT_SYSCALL_NR(regs),
+ UPT_SYSCALL_ARG1(regs),
+ UPT_SYSCALL_ARG2(regs),
+ UPT_SYSCALL_ARG3(regs),
+ UPT_SYSCALL_ARG4(regs));
+ else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
+ UPT_SYSCALL_RET(regs));
+ }
/* Fake a debug trap */
if (is_singlestep)
diff --git a/trunk/arch/x86/ia32/ia32entry.S b/trunk/arch/x86/ia32/ia32entry.S
index e3e734005e19..1106261856c8 100644
--- a/trunk/arch/x86/ia32/ia32entry.S
+++ b/trunk/arch/x86/ia32/ia32entry.S
@@ -14,7 +14,6 @@
#include
#include
#include
-#include
/* Avoid __ASSEMBLER__'ifying just for this. */
#include
@@ -190,7 +189,7 @@ sysexit_from_sys_call:
movl %ebx,%edx /* 3rd arg: 1st syscall arg */
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
- call __audit_syscall_entry
+ call audit_syscall_entry
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
@@ -207,13 +206,12 @@ sysexit_from_sys_call:
TRACE_IRQS_ON
sti
movl %eax,%esi /* second arg, syscall return value */
- cmpl $-MAX_ERRNO,%eax /* is it an error ? */
- jbe 1f
- movslq %eax, %rsi /* if error sign extend to 64 bits */
-1: setbe %al /* 1 if error, 0 if not */
+ cmpl $0,%eax /* is it < 0? */
+ setl %al /* 1 if so, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */
- call __audit_syscall_exit
- movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
+ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
+ call audit_syscall_exit
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
cli
TRACE_IRQS_OFF
diff --git a/trunk/arch/x86/kernel/e820.c b/trunk/arch/x86/kernel/e820.c
index 62d61e9976eb..174d938d576b 100644
--- a/trunk/arch/x86/kernel/e820.c
+++ b/trunk/arch/x86/kernel/e820.c
@@ -703,7 +703,7 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
}
#endif
-#ifdef CONFIG_ACPI
+#ifdef CONFIG_HIBERNATION
/**
* Mark ACPI NVS memory region, so that we can save/restore it during
* hibernation and the subsequent resume.
@@ -716,7 +716,7 @@ static int __init e820_mark_nvs_memory(void)
struct e820entry *ei = &e820.map[i];
if (ei->type == E820_NVS)
- acpi_nvs_register(ei->addr, ei->size);
+ suspend_nvs_register(ei->addr, ei->size);
}
return 0;
diff --git a/trunk/arch/x86/kernel/entry_32.S b/trunk/arch/x86/kernel/entry_32.S
index 79d97e68f042..4af9fd2450a5 100644
--- a/trunk/arch/x86/kernel/entry_32.S
+++ b/trunk/arch/x86/kernel/entry_32.S
@@ -42,7 +42,6 @@
*/
#include
-#include
#include
#include
#include
@@ -454,7 +453,7 @@ sysenter_audit:
movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
movl %eax,%edx /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
- call __audit_syscall_entry
+ call audit_syscall_entry
pushl_cfi %ebx
movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call
@@ -465,10 +464,11 @@ sysexit_audit:
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY)
movl %eax,%edx /* second arg, syscall return value */
- cmpl $-MAX_ERRNO,%eax /* is it an error ? */
- setbe %al /* 1 if so, 0 if not */
+ cmpl $0,%eax /* is it < 0? */
+ setl %al /* 1 if so, 0 if not */
movzbl %al,%eax /* zero-extend that */
- call __audit_syscall_exit
+ inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
+ call audit_syscall_exit
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S
index 3fe8239fd8fb..940ba711fc28 100644
--- a/trunk/arch/x86/kernel/entry_64.S
+++ b/trunk/arch/x86/kernel/entry_64.S
@@ -55,7 +55,6 @@
#include
#include
#include
-#include
/* Avoid __ASSEMBLER__'ifying just for this. */
#include
@@ -549,7 +548,7 @@ badsys:
#ifdef CONFIG_AUDITSYSCALL
/*
* Fast path for syscall audit without full syscall trace.
- * We just call __audit_syscall_entry() directly, and then
+ * We just call audit_syscall_entry() directly, and then
* jump back to the normal fast path.
*/
auditsys:
@@ -559,21 +558,22 @@ auditsys:
movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
movq %rax,%rsi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
- call __audit_syscall_entry
+ call audit_syscall_entry
LOAD_ARGS 0 /* reload call-clobbered registers */
jmp system_call_fastpath
/*
- * Return fast path for syscall audit. Call __audit_syscall_exit()
+ * Return fast path for syscall audit. Call audit_syscall_exit()
* directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
* masked off.
*/
sysret_audit:
movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
- cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
- setbe %al /* 1 if so, 0 if not */
+ cmpq $0,%rsi /* is it < 0? */
+ setl %al /* 1 if so, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */
- call __audit_syscall_exit
+ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
+ call audit_syscall_exit
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
jmp sysret_check
#endif /* CONFIG_AUDITSYSCALL */
diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c
index 50267386b766..89a04c7b5bb6 100644
--- a/trunk/arch/x86/kernel/ptrace.c
+++ b/trunk/arch/x86/kernel/ptrace.c
@@ -1392,18 +1392,20 @@ long syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->orig_ax);
- if (IS_IA32)
- audit_syscall_entry(AUDIT_ARCH_I386,
- regs->orig_ax,
- regs->bx, regs->cx,
- regs->dx, regs->si);
+ if (unlikely(current->audit_context)) {
+ if (IS_IA32)
+ audit_syscall_entry(AUDIT_ARCH_I386,
+ regs->orig_ax,
+ regs->bx, regs->cx,
+ regs->dx, regs->si);
#ifdef CONFIG_X86_64
- else
- audit_syscall_entry(AUDIT_ARCH_X86_64,
- regs->orig_ax,
- regs->di, regs->si,
- regs->dx, regs->r10);
+ else
+ audit_syscall_entry(AUDIT_ARCH_X86_64,
+ regs->orig_ax,
+ regs->di, regs->si,
+ regs->dx, regs->r10);
#endif
+ }
return ret ?: regs->orig_ax;
}
@@ -1412,7 +1414,8 @@ void syscall_trace_leave(struct pt_regs *regs)
{
bool step;
- audit_syscall_exit(regs);
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
diff --git a/trunk/arch/x86/kernel/vm86_32.c b/trunk/arch/x86/kernel/vm86_32.c
index b466cab5ba15..863f8753ab0a 100644
--- a/trunk/arch/x86/kernel/vm86_32.c
+++ b/trunk/arch/x86/kernel/vm86_32.c
@@ -335,11 +335,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);
- /*call __audit_syscall_exit since we do not exit via the normal paths */
-#ifdef CONFIG_AUDITSYSCALL
+ /*call audit_syscall_exit since we do not exit via the normal paths */
if (unlikely(current->audit_context))
- __audit_syscall_exit(1, 0);
-#endif
+ audit_syscall_exit(AUDITSC_RESULT(0), 0);
__asm__ __volatile__(
"movl %0,%%esp\n\t"
diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c
index 1c1c4f46a7c1..fd61b3fb7341 100644
--- a/trunk/arch/x86/mm/srat.c
+++ b/trunk/arch/x86/mm/srat.c
@@ -109,8 +109,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain_lo;
- if (acpi_srat_revision >= 2)
- pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -162,8 +160,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
- if (acpi_srat_revision <= 1)
- pxm &= 0xff;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
diff --git a/trunk/arch/x86/um/shared/sysdep/ptrace.h b/trunk/arch/x86/um/shared/sysdep/ptrace.h
index 2bbe1ec2d96a..711b1621747f 100644
--- a/trunk/arch/x86/um/shared/sysdep/ptrace.h
+++ b/trunk/arch/x86/um/shared/sysdep/ptrace.h
@@ -1,15 +1,5 @@
-#ifndef __SYSDEP_X86_PTRACE_H
-#define __SYSDEP_X86_PTRACE_H
-
#ifdef __i386__
#include "ptrace_32.h"
#else
#include "ptrace_64.h"
#endif
-
-static inline long regs_return_value(struct uml_pt_regs *regs)
-{
- return UPT_SYSCALL_RET(regs);
-}
-
-#endif /* __SYSDEP_X86_PTRACE_H */
diff --git a/trunk/arch/xtensa/kernel/ptrace.c b/trunk/arch/xtensa/kernel/ptrace.c
index 2dff698ab02e..a0d042aa2967 100644
--- a/trunk/arch/xtensa/kernel/ptrace.c
+++ b/trunk/arch/xtensa/kernel/ptrace.c
@@ -334,7 +334,8 @@ void do_syscall_trace_enter(struct pt_regs *regs)
do_syscall_trace();
#if 0
- audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
#endif
}
diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c
index ee55019066a1..163263ddd381 100644
--- a/trunk/block/cfq-iosched.c
+++ b/trunk/block/cfq-iosched.c
@@ -3117,17 +3117,18 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
+ struct cfq_queue *old_cfqq = cfqd->active_queue;
+
cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
/*
* workload type is changed, don't save slice, otherwise preempt
* doesn't happen
*/
- if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq))
+ if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
cfqq->cfqg->saved_workload_slice = 0;
- cfq_slice_expired(cfqd, 1);
-
/*
* Put the new queue at the front of the of the current list,
* so we know that it will be selected next.
diff --git a/trunk/drivers/acpi/Makefile b/trunk/drivers/acpi/Makefile
index c07f44f05f9d..ecb26b4f29a0 100644
--- a/trunk/drivers/acpi/Makefile
+++ b/trunk/drivers/acpi/Makefile
@@ -20,12 +20,11 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
acpi-y += atomicio.o
-acpi-y += nvs.o
# sleep related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP) += proc.o
+acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
#
diff --git a/trunk/drivers/acpi/acpica/Makefile b/trunk/drivers/acpi/acpica/Makefile
index 0ca208b6dcf0..301bd2d388ad 100644
--- a/trunk/drivers/acpi/acpica/Makefile
+++ b/trunk/drivers/acpi/acpica/Makefile
@@ -8,151 +8,41 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
-acpi-y := \
- dsargs.o \
- dscontrol.o \
- dsfield.o \
- dsinit.o \
- dsmethod.o \
- dsmthdat.o \
- dsobject.o \
- dsopcode.o \
- dsutils.o \
- dswexec.o \
- dswload.o \
- dswload2.o \
- dswscope.o \
- dswstate.o
+acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
+ dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
+ dsinit.o dsargs.o dscontrol.o dswload2.o
-acpi-y += \
- evevent.o \
- evgpe.o \
- evgpeblk.o \
- evgpeinit.o \
- evgpeutil.o \
- evglock.o \
- evmisc.o \
- evregion.o \
- evrgnini.o \
- evsci.o \
- evxface.o \
- evxfevnt.o \
- evxfgpe.o \
- evxfregn.o
+acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
+ evmisc.o evrgnini.o evxface.o evxfregn.o \
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
-acpi-y += \
- exconfig.o \
- exconvrt.o \
- excreate.o \
- exdebug.o \
- exdump.o \
- exfield.o \
- exfldio.o \
- exmutex.o \
- exnames.o \
- exoparg1.o \
- exoparg2.o \
- exoparg3.o \
- exoparg6.o \
- exprep.o \
- exmisc.o \
- exregion.o \
- exresnte.o \
- exresolv.o \
- exresop.o \
- exstore.o \
- exstoren.o \
- exstorob.o \
- exsystem.o \
- exutils.o
+acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
+ exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
+ excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
+ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
-acpi-y += \
- hwacpi.o \
- hwgpe.o \
- hwpci.o \
- hwregs.o \
- hwsleep.o \
- hwvalid.o \
- hwxface.o
+acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
-acpi-y += \
- nsaccess.o \
- nsalloc.o \
- nsdump.o \
- nseval.o \
- nsinit.o \
- nsload.o \
- nsnames.o \
- nsobject.o \
- nsparse.o \
- nspredef.o \
- nsrepair.o \
- nsrepair2.o \
- nssearch.o \
- nsutils.o \
- nswalk.o \
- nsxfeval.o \
- nsxfname.o \
- nsxfobj.o
+acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
+ nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
+ nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
+ nsparse.o nspredef.o nsrepair.o nsrepair2.o
acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
-acpi-y += \
- psargs.o \
- psloop.o \
- psopcode.o \
- psparse.o \
- psscope.o \
- pstree.o \
- psutils.o \
- pswalk.o \
- psxface.o
+acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
+ psopcode.o psscope.o psutils.o psxface.o
-acpi-y += \
- rsaddr.o \
- rscalc.o \
- rscreate.o \
- rsinfo.o \
- rsio.o \
- rsirq.o \
- rslist.o \
- rsmemory.o \
- rsmisc.o \
- rsserial.o \
- rsutils.o \
- rsxface.o
+acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
+ rscalc.o rsirq.o rsmemory.o rsutils.o
acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
-acpi-y += \
- tbfadt.o \
- tbfind.o \
- tbinstal.o \
- tbutils.o \
- tbxface.o \
- tbxfroot.o
+acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
-acpi-y += \
- utaddress.o \
- utalloc.o \
- utcopy.o \
- utdebug.o \
- utdecode.o \
- utdelete.o \
- uteval.o \
- utglobal.o \
- utids.o \
- utinit.o \
- utlock.o \
- utmath.o \
- utmisc.o \
- utmutex.o \
- utobject.o \
- utosi.o \
- utresrc.o \
- utstate.o \
- utxface.o \
- utxferror.o \
- utxfmutex.o
+acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
+ utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
+ utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
+ utosi.o utxferror.o utdecode.o
diff --git a/trunk/drivers/acpi/acpica/accommon.h b/trunk/drivers/acpi/acpica/accommon.h
index a44bd424f9f4..e0ba17f0a7c8 100644
--- a/trunk/drivers/acpi/acpica/accommon.h
+++ b/trunk/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acconfig.h b/trunk/drivers/acpi/acpica/acconfig.h
index 1f30af613e87..f895a244ca7e 100644
--- a/trunk/drivers/acpi/acpica/acconfig.h
+++ b/trunk/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -123,10 +123,6 @@
#define ACPI_MAX_SLEEP 2000 /* Two seconds */
-/* Address Range lists are per-space_id (Memory and I/O only) */
-
-#define ACPI_ADDRESS_RANGE_MAX 2
-
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
@@ -206,10 +202,9 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus, GSBus and IPMI bidirectional buffer size */
+/* SMBus and IPMI bidirectional buffer size */
#define ACPI_SMBUS_BUFFER_SIZE 34
-#define ACPI_GSBUS_BUFFER_SIZE 34
#define ACPI_IPMI_BUFFER_SIZE 66
/* _sx_d and _sx_w control methods */
diff --git a/trunk/drivers/acpi/acpica/acdebug.h b/trunk/drivers/acpi/acpica/acdebug.h
index deaa81979561..eb0b1f8dee6d 100644
--- a/trunk/drivers/acpi/acpica/acdebug.h
+++ b/trunk/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acdispat.h b/trunk/drivers/acpi/acpica/acdispat.h
index 5935ba6707e2..2d1b7ffa377a 100644
--- a/trunk/drivers/acpi/acpica/acdispat.h
+++ b/trunk/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acevents.h b/trunk/drivers/acpi/acpica/acevents.h
index c53caa521a30..bea3b4899183 100644
--- a/trunk/drivers/acpi/acpica/acevents.h
+++ b/trunk/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -162,7 +162,6 @@ acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
- union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value);
diff --git a/trunk/drivers/acpi/acpica/acglobal.h b/trunk/drivers/acpi/acpica/acglobal.h
index 2853f7673f3b..e6652d716e45 100644
--- a/trunk/drivers/acpi/acpica/acglobal.h
+++ b/trunk/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -140,19 +140,8 @@ u32 acpi_gbl_trace_flags;
acpi_name acpi_gbl_trace_method_name;
u8 acpi_gbl_system_awake_and_running;
-/*
- * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
- * that the ACPI hardware is no longer required. A flag in the FADT indicates
- * a reduced HW machine, and that flag is duplicated here for convenience.
- */
-u8 acpi_gbl_reduced_hardware;
-
#endif
-/* Do not disassemble buffers to resource descriptors */
-
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
-
/*****************************************************************************
*
* Debug support
@@ -218,7 +207,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
/*****************************************************************************
*
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exlusion within ACPICA subsystem
*
****************************************************************************/
@@ -306,8 +295,6 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
ACPI_EXTERN u8 acpi_gbl_osi_data;
ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
-ACPI_EXTERN struct acpi_address_range
- *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
#ifndef DEFINE_ACPI_GLOBALS
diff --git a/trunk/drivers/acpi/acpica/achware.h b/trunk/drivers/acpi/acpica/achware.h
index 677793e938f5..e7213beaafc7 100644
--- a/trunk/drivers/acpi/acpica/achware.h
+++ b/trunk/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acinterp.h b/trunk/drivers/acpi/acpica/acinterp.h
index eb308635da72..3731e1c34b83 100644
--- a/trunk/drivers/acpi/acpica/acinterp.h
+++ b/trunk/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -468,8 +468,6 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
void acpi_ex_integer_to_string(char *dest, u64 value);
-u8 acpi_is_valid_space_id(u8 space_id);
-
/*
* exregion - default op_region handlers
*/
diff --git a/trunk/drivers/acpi/acpica/aclocal.h b/trunk/drivers/acpi/acpica/aclocal.h
index 3f24068837d5..5552125d8340 100644
--- a/trunk/drivers/acpi/acpica/aclocal.h
+++ b/trunk/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x81
+#define AML_NUM_OPCODES 0x7F
/* Forward declarations */
@@ -249,16 +249,12 @@ struct acpi_create_field_info {
struct acpi_namespace_node *field_node;
struct acpi_namespace_node *register_node;
struct acpi_namespace_node *data_register_node;
- struct acpi_namespace_node *connection_node;
- u8 *resource_buffer;
u32 bank_value;
u32 field_bit_position;
u32 field_bit_length;
- u16 resource_length;
u8 field_flags;
u8 attribute;
u8 field_type;
- u8 access_length;
};
typedef
@@ -319,8 +315,7 @@ struct acpi_name_info {
/*
* Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
- * ACPI_PTYPE2_FIX_VAR
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
*/
struct acpi_package_info {
u8 type;
@@ -630,15 +625,6 @@ union acpi_generic_state {
typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
-/* Address Range info block */
-
-struct acpi_address_range {
- struct acpi_address_range *next;
- struct acpi_namespace_node *region_node;
- acpi_physical_address start_address;
- acpi_physical_address end_address;
-};
-
/*****************************************************************************
*
* Parser typedefs and structs
@@ -965,7 +951,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
#define ACPI_RESOURCE_NAME_IO 0x40
#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
-#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50
+#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50
#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
@@ -987,9 +973,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
-#define ACPI_RESOURCE_NAME_GPIO 0x8C
-#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B
/*****************************************************************************
*
diff --git a/trunk/drivers/acpi/acpica/acmacros.h b/trunk/drivers/acpi/acpica/acmacros.h
index ef338a96f5b2..b7491ee1fba6 100644
--- a/trunk/drivers/acpi/acpica/acmacros.h
+++ b/trunk/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acnamesp.h b/trunk/drivers/acpi/acpica/acnamesp.h
index 2c9e0f049523..79a598c67fe3 100644
--- a/trunk/drivers/acpi/acpica/acnamesp.h
+++ b/trunk/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acobject.h b/trunk/drivers/acpi/acpica/acobject.h
index c065078ca83b..1055769f2f01 100644
--- a/trunk/drivers/acpi/acpica/acobject.h
+++ b/trunk/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,7 +254,6 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_length; /* For serial regions/fields */
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -262,9 +261,7 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and
};
struct acpi_object_region_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
- union acpi_operand_object *region_obj; /* Containing op_region object */
- u8 *resource_buffer; /* resource_template for serial regions/fields */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
};
struct acpi_object_bank_field {
@@ -361,7 +358,6 @@ typedef enum {
*/
struct acpi_object_extra {
ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
- struct acpi_namespace_node *scope_node;
void *region_context; /* Region-specific data */
u8 *aml_start;
u32 aml_length;
diff --git a/trunk/drivers/acpi/acpica/acopcode.h b/trunk/drivers/acpi/acpica/acopcode.h
index 9440d053fbb3..bb2ccfad7376 100644
--- a/trunk/drivers/acpi/acpica/acopcode.h
+++ b/trunk/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,7 +93,6 @@
#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
-#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_CONTINUE_OP ARG_NONE
#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
@@ -165,7 +164,6 @@
#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
#define ARGP_REVISION_OP ARG_NONE
#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
-#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
@@ -225,7 +223,6 @@
#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
-#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
@@ -297,7 +294,6 @@
#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
#define ARGI_REVISION_OP ARG_NONE
#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
-#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
diff --git a/trunk/drivers/acpi/acpica/acparser.h b/trunk/drivers/acpi/acpica/acparser.h
index b725d780d34d..5ea1e06afa20 100644
--- a/trunk/drivers/acpi/acpica/acparser.h
+++ b/trunk/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acpredef.h b/trunk/drivers/acpi/acpica/acpredef.h
index bbb34c9be4e8..c445cca490ea 100644
--- a/trunk/drivers/acpi/acpica/acpredef.h
+++ b/trunk/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,14 +94,6 @@
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
* (Used for _ART, _FPS)
*
- * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
- * followed by an optional element
- * object type
- * count
- * object type
- * count = 0 (optional)
- * (Used for _DLM)
- *
*****************************************************************************/
enum acpi_return_package_types {
@@ -113,8 +105,7 @@ enum acpi_return_package_types {
ACPI_PTYPE2_PKG_COUNT = 6,
ACPI_PTYPE2_FIXED = 7,
ACPI_PTYPE2_MIN = 8,
- ACPI_PTYPE2_REV_FIXED = 9,
- ACPI_PTYPE2_FIX_VAR = 10
+ ACPI_PTYPE2_REV_FIXED = 9
};
#ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -163,7 +154,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_AC8", 0, ACPI_RTYPE_INTEGER}},
{{"_AC9", 0, ACPI_RTYPE_INTEGER}},
{{"_ADR", 0, ACPI_RTYPE_INTEGER}},
- {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
{{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -239,13 +229,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
- {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
-
- {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
- 0}},
-
{{"_CRS", 0, ACPI_RTYPE_BUFFER}},
{{"_CRT", 0, ACPI_RTYPE_INTEGER}},
{{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -254,21 +237,12 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
{{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
- {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
{{"_DCS", 0, ACPI_RTYPE_INTEGER}},
{{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
{{"_DDN", 0, ACPI_RTYPE_STRING}},
- {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
{{"_DGS", 0, ACPI_RTYPE_INTEGER}},
{{"_DIS", 0, 0}},
-
- {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
- {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
- ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
-
{{"_DMA", 0, ACPI_RTYPE_BUFFER}},
{{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -288,7 +262,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_EJ3", 1, 0}},
{{"_EJ4", 1, 0}},
{{"_EJD", 0, ACPI_RTYPE_STRING}},
- {{"_EVT", 1, 0}},
{{"_FDE", 0, ACPI_RTYPE_BUFFER}},
{{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -308,17 +281,14 @@ static const union acpi_predefined_info predefined_names[] =
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
{{"_GAI", 0, ACPI_RTYPE_INTEGER}},
- {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
{{"_GHL", 0, ACPI_RTYPE_INTEGER}},
{{"_GLK", 0, ACPI_RTYPE_INTEGER}},
{{"_GPD", 0, ACPI_RTYPE_INTEGER}},
{{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
- {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
{{"_GSB", 0, ACPI_RTYPE_INTEGER}},
{{"_GTF", 0, ACPI_RTYPE_BUFFER}},
{{"_GTM", 0, ACPI_RTYPE_BUFFER}},
{{"_GTS", 1, 0}},
- {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
{{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
{{"_HOT", 0, ACPI_RTYPE_INTEGER}},
{{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -333,7 +303,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
{{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
- {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
{{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_INI", 0, 0}},
{{"_IRC", 0, 0}},
@@ -392,9 +361,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
- {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
-
{{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
@@ -425,7 +391,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
{{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
- {{"_PSE", 1, 0}},
{{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -492,7 +457,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SLI", 0, ACPI_RTYPE_BUFFER}},
{{"_SPD", 1, ACPI_RTYPE_INTEGER}},
{{"_SRS", 1, 0}},
- {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
{{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_SST", 1, 0}},
{{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -500,7 +464,6 @@ static const union acpi_predefined_info predefined_names[] =
{{"_STP", 2, ACPI_RTYPE_INTEGER}},
{{"_STR", 0, ACPI_RTYPE_BUFFER}},
{{"_STV", 2, ACPI_RTYPE_INTEGER}},
- {{"_SUB", 0, ACPI_RTYPE_STRING}},
{{"_SUN", 0, ACPI_RTYPE_INTEGER}},
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/trunk/drivers/acpi/acpica/acresrc.h b/trunk/drivers/acpi/acpica/acresrc.h
index 0347d0993497..f08b55b7f3a0 100644
--- a/trunk/drivers/acpi/acpica/acresrc.h
+++ b/trunk/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,40 +73,28 @@ typedef const struct acpi_rsconvert_info {
/* Resource conversion opcodes */
-typedef enum {
- ACPI_RSC_INITGET = 0,
- ACPI_RSC_INITSET,
- ACPI_RSC_FLAGINIT,
- ACPI_RSC_1BITFLAG,
- ACPI_RSC_2BITFLAG,
- ACPI_RSC_3BITFLAG,
- ACPI_RSC_ADDRESS,
- ACPI_RSC_BITMASK,
- ACPI_RSC_BITMASK16,
- ACPI_RSC_COUNT,
- ACPI_RSC_COUNT16,
- ACPI_RSC_COUNT_GPIO_PIN,
- ACPI_RSC_COUNT_GPIO_RES,
- ACPI_RSC_COUNT_GPIO_VEN,
- ACPI_RSC_COUNT_SERIAL_RES,
- ACPI_RSC_COUNT_SERIAL_VEN,
- ACPI_RSC_DATA8,
- ACPI_RSC_EXIT_EQ,
- ACPI_RSC_EXIT_LE,
- ACPI_RSC_EXIT_NE,
- ACPI_RSC_LENGTH,
- ACPI_RSC_MOVE_GPIO_PIN,
- ACPI_RSC_MOVE_GPIO_RES,
- ACPI_RSC_MOVE_SERIAL_RES,
- ACPI_RSC_MOVE_SERIAL_VEN,
- ACPI_RSC_MOVE8,
- ACPI_RSC_MOVE16,
- ACPI_RSC_MOVE32,
- ACPI_RSC_MOVE64,
- ACPI_RSC_SET8,
- ACPI_RSC_SOURCE,
- ACPI_RSC_SOURCEX
-} ACPI_RSCONVERT_OPCODES;
+#define ACPI_RSC_INITGET 0
+#define ACPI_RSC_INITSET 1
+#define ACPI_RSC_FLAGINIT 2
+#define ACPI_RSC_1BITFLAG 3
+#define ACPI_RSC_2BITFLAG 4
+#define ACPI_RSC_COUNT 5
+#define ACPI_RSC_COUNT16 6
+#define ACPI_RSC_LENGTH 7
+#define ACPI_RSC_MOVE8 8
+#define ACPI_RSC_MOVE16 9
+#define ACPI_RSC_MOVE32 10
+#define ACPI_RSC_MOVE64 11
+#define ACPI_RSC_SET8 12
+#define ACPI_RSC_DATA8 13
+#define ACPI_RSC_ADDRESS 14
+#define ACPI_RSC_SOURCE 15
+#define ACPI_RSC_SOURCEX 16
+#define ACPI_RSC_BITMASK 17
+#define ACPI_RSC_BITMASK16 18
+#define ACPI_RSC_EXIT_NE 19
+#define ACPI_RSC_EXIT_LE 20
+#define ACPI_RSC_EXIT_EQ 21
/* Resource Conversion sub-opcodes */
@@ -118,9 +106,6 @@ typedef enum {
#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
-/*
- * Individual entry for the resource dump tables
- */
typedef const struct acpi_rsdump_info {
u8 opcode;
u8 offset;
@@ -131,25 +116,20 @@ typedef const struct acpi_rsdump_info {
/* Values for the Opcode field above */
-typedef enum {
- ACPI_RSD_TITLE = 0,
- ACPI_RSD_1BITFLAG,
- ACPI_RSD_2BITFLAG,
- ACPI_RSD_3BITFLAG,
- ACPI_RSD_ADDRESS,
- ACPI_RSD_DWORDLIST,
- ACPI_RSD_LITERAL,
- ACPI_RSD_LONGLIST,
- ACPI_RSD_SHORTLIST,
- ACPI_RSD_SHORTLISTX,
- ACPI_RSD_SOURCE,
- ACPI_RSD_STRING,
- ACPI_RSD_UINT8,
- ACPI_RSD_UINT16,
- ACPI_RSD_UINT32,
- ACPI_RSD_UINT64,
- ACPI_RSD_WORDLIST
-} ACPI_RSDUMP_OPCODES;
+#define ACPI_RSD_TITLE 0
+#define ACPI_RSD_LITERAL 1
+#define ACPI_RSD_STRING 2
+#define ACPI_RSD_UINT8 3
+#define ACPI_RSD_UINT16 4
+#define ACPI_RSD_UINT32 5
+#define ACPI_RSD_UINT64 6
+#define ACPI_RSD_1BITFLAG 7
+#define ACPI_RSD_2BITFLAG 8
+#define ACPI_RSD_SHORTLIST 9
+#define ACPI_RSD_LONGLIST 10
+#define ACPI_RSD_DWORDLIST 11
+#define ACPI_RSD_ADDRESS 12
+#define ACPI_RSD_SOURCE 13
/* restore default alignment */
@@ -158,18 +138,13 @@ typedef enum {
/* Resource tables indexed by internal resource type */
extern const u8 acpi_gbl_aml_resource_sizes[];
-extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
/* Resource tables indexed by raw AML resource descriptor type */
extern const u8 acpi_gbl_resource_struct_sizes[];
-extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
-extern struct acpi_rsconvert_info
- *acpi_gbl_convert_resource_serial_bus_dispatch[];
-
struct acpi_vendor_walk_info {
struct acpi_vendor_uuid *uuid;
struct acpi_buffer *buffer;
@@ -215,10 +190,6 @@ acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *ret_buffer);
-acpi_status
-acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
- struct acpi_buffer *ret_buffer);
-
/*
* rscalc
*/
@@ -322,11 +293,6 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
-extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
-extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
-extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
-extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
-extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
/* These resources require separate get/set tables */
@@ -344,7 +310,6 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
* rsinfo
*/
extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
-extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
/*
* rsdump
@@ -366,12 +331,6 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
-extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
-extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
-extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
-extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
-extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
-extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/trunk/drivers/acpi/acpica/acstruct.h b/trunk/drivers/acpi/acpica/acstruct.h
index 0404df605bc1..1623b245dde2 100644
--- a/trunk/drivers/acpi/acpica/acstruct.h
+++ b/trunk/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/actables.h b/trunk/drivers/acpi/acpica/actables.h
index d5bec304c823..967f08124eba 100644
--- a/trunk/drivers/acpi/acpica/actables.h
+++ b/trunk/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/acutils.h b/trunk/drivers/acpi/acpica/acutils.h
index 925ccf22101b..99c140d8e348 100644
--- a/trunk/drivers/acpi/acpica/acutils.h
+++ b/trunk/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,6 @@
#define _ACUTILS_H
extern const u8 acpi_gbl_resource_aml_sizes[];
-extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
/* Strings used by the disassembler and debugger resource dump routines */
@@ -579,24 +578,6 @@ acpi_ut_create_list(char *list_name,
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
-/*
- * utaddress - address range check
- */
-acpi_status
-acpi_ut_add_address_range(acpi_adr_space_type space_id,
- acpi_physical_address address,
- u32 length, struct acpi_namespace_node *region_node);
-
-void
-acpi_ut_remove_address_range(acpi_adr_space_type space_id,
- struct acpi_namespace_node *region_node);
-
-u32
-acpi_ut_check_address_range(acpi_adr_space_type space_id,
- acpi_physical_address address, u32 length, u8 warn);
-
-void acpi_ut_delete_address_lists(void);
-
/*
* utxferror - various error/warning output functions
*/
diff --git a/trunk/drivers/acpi/acpica/amlcode.h b/trunk/drivers/acpi/acpica/amlcode.h
index 905280fec0fa..1077f17859ed 100644
--- a/trunk/drivers/acpi/acpica/amlcode.h
+++ b/trunk/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -188,14 +188,6 @@
#define AML_LLESSEQUAL_OP (u16) 0x9294
#define AML_LNOTEQUAL_OP (u16) 0x9293
-/*
- * Opcodes for "Field" operators
- */
-#define AML_FIELD_OFFSET_OP (u8) 0x00
-#define AML_FIELD_ACCESS_OP (u8) 0x01
-#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */
-#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */
-
/*
* Internal opcodes
* Use only "Unknown" AML opcodes, don't attempt to use
@@ -210,8 +202,6 @@
#define AML_INT_METHODCALL_OP (u16) 0x0035
#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
-#define AML_INT_CONNECTION_OP (u16) 0x0038
-#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039
#define ARG_NONE 0x0
@@ -466,16 +456,13 @@ typedef enum {
* access_as keyword
*/
typedef enum {
- AML_FIELD_ATTRIB_QUICK = 0x02,
- AML_FIELD_ATTRIB_SEND_RCV = 0x04,
- AML_FIELD_ATTRIB_BYTE = 0x06,
- AML_FIELD_ATTRIB_WORD = 0x08,
- AML_FIELD_ATTRIB_BLOCK = 0x0A,
- AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
- AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
- AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
- AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
- AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
+ AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
+ AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
+ AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
+ AML_FIELD_ATTRIB_SMB_WORD = 0x08,
+ AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
+ AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
+ AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
} AML_ACCESS_ATTRIBUTE;
/* Bit fields in the AML method_flags byte */
diff --git a/trunk/drivers/acpi/acpica/amlresrc.h b/trunk/drivers/acpi/acpica/amlresrc.h
index 7b2128f274e7..59122cde247c 100644
--- a/trunk/drivers/acpi/acpica/amlresrc.h
+++ b/trunk/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,48 +58,29 @@
#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
#define ACPI_RESTAG_BASEADDRESS "_BAS"
#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
-#define ACPI_RESTAG_DEBOUNCETIME "_DBT"
#define ACPI_RESTAG_DECODE "_DEC"
-#define ACPI_RESTAG_DEVICEPOLARITY "_DPL"
#define ACPI_RESTAG_DMA "_DMA"
#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
-#define ACPI_RESTAG_DRIVESTRENGTH "_DRS"
-#define ACPI_RESTAG_ENDIANNESS "_END"
-#define ACPI_RESTAG_FLOWCONTROL "_FLC"
#define ACPI_RESTAG_GRANULARITY "_GRA"
#define ACPI_RESTAG_INTERRUPT "_INT"
#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
-#define ACPI_RESTAG_IORESTRICTION "_IOR"
#define ACPI_RESTAG_LENGTH "_LEN"
-#define ACPI_RESTAG_LINE "_LIN"
#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
#define ACPI_RESTAG_MAXADDR "_MAX"
#define ACPI_RESTAG_MINADDR "_MIN"
#define ACPI_RESTAG_MAXTYPE "_MAF"
#define ACPI_RESTAG_MINTYPE "_MIF"
-#define ACPI_RESTAG_MODE "_MOD"
-#define ACPI_RESTAG_PARITY "_PAR"
-#define ACPI_RESTAG_PHASE "_PHA"
-#define ACPI_RESTAG_PIN "_PIN"
-#define ACPI_RESTAG_PINCONFIG "_PPI"
-#define ACPI_RESTAG_POLARITY "_POL"
#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
#define ACPI_RESTAG_RANGETYPE "_RNG"
#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
-#define ACPI_RESTAG_LENGTH_RX "_RXL"
-#define ACPI_RESTAG_LENGTH_TX "_TXL"
-#define ACPI_RESTAG_SLAVEMODE "_SLV"
-#define ACPI_RESTAG_SPEED "_SPE"
-#define ACPI_RESTAG_STOPBITS "_STB"
#define ACPI_RESTAG_TRANSLATION "_TRA"
#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
-#define ACPI_RESTAG_VENDORDATA "_VEN"
/* Default sizes for "small" resource descriptors */
@@ -109,7 +90,6 @@
#define ASL_RDESC_END_DEPEND_SIZE 0x00
#define ASL_RDESC_IO_SIZE 0x07
#define ASL_RDESC_FIXED_IO_SIZE 0x03
-#define ASL_RDESC_FIXED_DMA_SIZE 0x05
#define ASL_RDESC_END_TAG_SIZE 0x01
struct asl_resource_node {
@@ -184,12 +164,6 @@ struct aml_resource_end_tag {
AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
};
-struct aml_resource_fixed_dma {
- AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
- u16 channels;
- u8 width;
-};
-
/*
* LARGE descriptors
*/
@@ -289,110 +263,6 @@ struct aml_resource_generic_register {
u64 address;
};
-/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
-
-struct aml_resource_gpio {
- AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
- u8 connection_type;
- u16 flags;
- u16 int_flags;
- u8 pin_config;
- u16 drive_strength;
- u16 debounce_timeout;
- u16 pin_table_offset;
- u8 res_source_index;
- u16 res_source_offset;
- u16 vendor_offset;
- u16 vendor_length;
- /*
- * Optional fields follow immediately:
- * 1) PIN list (Words)
- * 2) Resource Source String
- * 3) Vendor Data bytes
- */
-};
-
-#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */
-
-/* Values for connection_type above */
-
-#define AML_RESOURCE_GPIO_TYPE_INT 0
-#define AML_RESOURCE_GPIO_TYPE_IO 1
-#define AML_RESOURCE_MAX_GPIOTYPE 1
-
-/* Common preamble for all serial descriptors (ACPI 5.0) */
-
-#define AML_RESOURCE_SERIAL_COMMON \
- u8 revision_id; \
- u8 res_source_index; \
- u8 type; \
- u8 flags; \
- u16 type_specific_flags; \
- u8 type_revision_id; \
- u16 type_data_length; \
-
-/* Values for the type field above */
-
-#define AML_RESOURCE_I2C_SERIALBUSTYPE 1
-#define AML_RESOURCE_SPI_SERIALBUSTYPE 2
-#define AML_RESOURCE_UART_SERIALBUSTYPE 3
-#define AML_RESOURCE_MAX_SERIALBUSTYPE 3
-#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
-
-struct aml_resource_common_serialbus {
-AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
-
-struct aml_resource_i2c_serialbus {
- AML_RESOURCE_LARGE_HEADER_COMMON
- AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
- u16 slave_address;
- /*
- * Optional fields follow immediately:
- * 1) Vendor Data bytes
- * 2) Resource Source String
- */
-};
-
-#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */
-#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */
-#define AML_RESOURCE_I2C_MIN_DATA_LEN 6
-
-struct aml_resource_spi_serialbus {
- AML_RESOURCE_LARGE_HEADER_COMMON
- AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
- u8 data_bit_length;
- u8 clock_phase;
- u8 clock_polarity;
- u16 device_selection;
- /*
- * Optional fields follow immediately:
- * 1) Vendor Data bytes
- * 2) Resource Source String
- */
-};
-
-#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */
-#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */
-#define AML_RESOURCE_SPI_MIN_DATA_LEN 9
-
-struct aml_resource_uart_serialbus {
- AML_RESOURCE_LARGE_HEADER_COMMON
- AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
- u16 rx_fifo_size;
- u16 tx_fifo_size;
- u8 parity;
- u8 lines_enabled;
- /*
- * Optional fields follow immediately:
- * 1) Vendor Data bytes
- * 2) Resource Source String
- */
-};
-
-#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */
-#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */
-#define AML_RESOURCE_UART_MIN_DATA_LEN 10
-
/* restore default alignment */
#pragma pack()
@@ -414,7 +284,6 @@ union aml_resource {
struct aml_resource_end_dependent end_dpf;
struct aml_resource_io io;
struct aml_resource_fixed_io fixed_io;
- struct aml_resource_fixed_dma fixed_dma;
struct aml_resource_vendor_small vendor_small;
struct aml_resource_end_tag end_tag;
@@ -430,11 +299,6 @@ union aml_resource {
struct aml_resource_address64 address64;
struct aml_resource_extended_address64 ext_address64;
struct aml_resource_extended_irq extended_irq;
- struct aml_resource_gpio gpio;
- struct aml_resource_i2c_serialbus i2c_serial_bus;
- struct aml_resource_spi_serialbus spi_serial_bus;
- struct aml_resource_uart_serialbus uart_serial_bus;
- struct aml_resource_common_serialbus common_serial_bus;
/* Utility overlays */
diff --git a/trunk/drivers/acpi/acpica/dsargs.c b/trunk/drivers/acpi/acpica/dsargs.c
index 80eb1900297f..8c7b99728aa2 100644
--- a/trunk/drivers/acpi/acpica/dsargs.c
+++ b/trunk/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -250,13 +250,6 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_ut_add_address_range(obj_desc->region.space_id,
- obj_desc->region.address,
- obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
@@ -391,15 +384,8 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
/* Execute the argument AML */
- status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
+ status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_ut_add_address_range(obj_desc->region.space_id,
- obj_desc->region.address,
- obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
diff --git a/trunk/drivers/acpi/acpica/dscontrol.c b/trunk/drivers/acpi/acpica/dscontrol.c
index effe4ca1133f..26c49fff58da 100644
--- a/trunk/drivers/acpi/acpica/dscontrol.c
+++ b/trunk/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsfield.c b/trunk/drivers/acpi/acpica/dsfield.c
index cd243cf2cab2..34be60c0e448 100644
--- a/trunk/drivers/acpi/acpica/dsfield.c
+++ b/trunk/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -221,7 +221,6 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
{
acpi_status status;
u64 position;
- union acpi_parse_object *child;
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
@@ -233,11 +232,10 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
while (arg) {
/*
- * Four types of field elements are handled:
- * 1) Name - Enters a new named field into the namespace
- * 2) Offset - specifies a bit offset
- * 3) access_as - changes the access mode/attributes
- * 4) Connection - Associate a resource template with the field
+ * Three types of field elements are handled:
+ * 1) Offset - specifies a bit offset
+ * 2) access_as - changes the access mode
+ * 3) Name - Enters a new named field into the namespace
*/
switch (arg->common.aml_opcode) {
case AML_INT_RESERVEDFIELD_OP:
@@ -255,70 +253,21 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
break;
case AML_INT_ACCESSFIELD_OP:
- case AML_INT_EXTACCESSFIELD_OP:
+
/*
- * Get new access_type, access_attribute, and access_length fields
- * -- to be used for all field units that follow, until the
- * end-of-field or another access_as keyword is encountered.
- * NOTE. These three bytes are encoded in the integer value
- * of the parseop for convenience.
+ * Get a new access_type and access_attribute -- to be used for all
+ * field units that follow, until field end or another access_as
+ * keyword.
*
* In field_flags, preserve the flag bits other than the
- * ACCESS_TYPE bits.
+ * ACCESS_TYPE bits
*/
-
- /* access_type (byte_acc, word_acc, etc.) */
-
info->field_flags = (u8)
((info->
field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
- ((u8)((u32)(arg->common.value.integer & 0x07))));
-
- /* access_attribute (attrib_quick, attrib_byte, etc.) */
-
- info->attribute =
- (u8)((arg->common.value.integer >> 8) & 0xFF);
-
- /* access_length (for serial/buffer protocols) */
-
- info->access_length =
- (u8)((arg->common.value.integer >> 16) & 0xFF);
- break;
-
- case AML_INT_CONNECTION_OP:
- /*
- * Clear any previous connection. New connection is used for all
- * fields that follow, similar to access_as
- */
- info->resource_buffer = NULL;
- info->connection_node = NULL;
+ ((u8) ((u32) arg->common.value.integer >> 8)));
- /*
- * A Connection() is either an actual resource descriptor (buffer)
- * or a named reference to a resource template
- */
- child = arg->common.value.arg;
- if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
- info->resource_buffer = child->named.data;
- info->resource_length =
- (u16)child->named.value.integer;
- } else {
- /* Lookup the Connection() namepath, it should already exist */
-
- status = acpi_ns_lookup(walk_state->scope_info,
- child->common.value.
- name, ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE,
- ACPI_NS_DONT_OPEN_SCOPE,
- walk_state,
- &info->connection_node);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(child->common.
- value.name,
- status);
- return_ACPI_STATUS(status);
- }
- }
+ info->attribute = (u8) (arg->common.value.integer);
break;
case AML_INT_NAMEDFIELD_OP:
@@ -425,8 +374,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
}
}
- ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
-
/* Second arg is the field flags */
arg = arg->common.next;
@@ -439,6 +386,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+
return_ACPI_STATUS(status);
}
@@ -526,8 +474,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
*/
while (arg) {
/*
- * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
- * in the field names in order to enter them into the namespace.
+ * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
+ * field names in order to enter them into the namespace.
*/
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
status = acpi_ns_lookup(walk_state->scope_info,
@@ -703,5 +651,6 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+
return_ACPI_STATUS(status);
}
diff --git a/trunk/drivers/acpi/acpica/dsinit.c b/trunk/drivers/acpi/acpica/dsinit.c
index 9e5ac7f780a7..a7718bf2b9a1 100644
--- a/trunk/drivers/acpi/acpica/dsinit.c
+++ b/trunk/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsmethod.c b/trunk/drivers/acpi/acpica/dsmethod.c
index 00f5dab5bcc0..5d797751e205 100644
--- a/trunk/drivers/acpi/acpica/dsmethod.c
+++ b/trunk/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsmthdat.c b/trunk/drivers/acpi/acpica/dsmthdat.c
index b40bd507be5d..905ce29a92e1 100644
--- a/trunk/drivers/acpi/acpica/dsmthdat.c
+++ b/trunk/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsobject.c b/trunk/drivers/acpi/acpica/dsobject.c
index d7045ca3e32a..f42e17e5c252 100644
--- a/trunk/drivers/acpi/acpica/dsobject.c
+++ b/trunk/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsopcode.c b/trunk/drivers/acpi/acpica/dsopcode.c
index e5eff7585102..c627a288e027 100644
--- a/trunk/drivers/acpi/acpica/dsopcode.c
+++ b/trunk/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dsutils.c b/trunk/drivers/acpi/acpica/dsutils.c
index 1abcda31037f..2c477ce172fa 100644
--- a/trunk/drivers/acpi/acpica/dsutils.c
+++ b/trunk/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswexec.c b/trunk/drivers/acpi/acpica/dswexec.c
index 642f3c053e87..fe40e4c6554f 100644
--- a/trunk/drivers/acpi/acpica/dswexec.c
+++ b/trunk/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswload.c b/trunk/drivers/acpi/acpica/dswload.c
index 552aa3a50c84..324acec1179a 100644
--- a/trunk/drivers/acpi/acpica/dswload.c
+++ b/trunk/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswload2.c b/trunk/drivers/acpi/acpica/dswload2.c
index ae7147724763..976318138c56 100644
--- a/trunk/drivers/acpi/acpica/dswload2.c
+++ b/trunk/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswscope.c b/trunk/drivers/acpi/acpica/dswscope.c
index 9e9490a9cbf0..76a661fc1e09 100644
--- a/trunk/drivers/acpi/acpica/dswscope.c
+++ b/trunk/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/dswstate.c b/trunk/drivers/acpi/acpica/dswstate.c
index c9c2ac13e7cc..a6c374ef9914 100644
--- a/trunk/drivers/acpi/acpica/dswstate.c
+++ b/trunk/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evevent.c b/trunk/drivers/acpi/acpica/evevent.c
index 6729ebe2f1e6..d458b041e651 100644
--- a/trunk/drivers/acpi/acpica/evevent.c
+++ b/trunk/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,12 +71,6 @@ acpi_status acpi_ev_initialize_events(void)
ACPI_FUNCTION_TRACE(ev_initialize_events);
- /* If Hardware Reduced flag is set, there are no fixed events */
-
- if (acpi_gbl_reduced_hardware) {
- return_ACPI_STATUS(AE_OK);
- }
-
/*
* Initialize the Fixed and General Purpose Events. This is done prior to
* enabling SCIs to prevent interrupts from occurring before the handlers
@@ -117,12 +111,6 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
- /* If Hardware Reduced flag is set, there is no ACPI h/w */
-
- if (acpi_gbl_reduced_hardware) {
- return_ACPI_STATUS(AE_OK);
- }
-
/* Install the SCI handler */
status = acpi_ev_install_sci_handler();
diff --git a/trunk/drivers/acpi/acpica/evglock.c b/trunk/drivers/acpi/acpica/evglock.c
index 5e5683cb1f0d..56a562a1e5d7 100644
--- a/trunk/drivers/acpi/acpica/evglock.c
+++ b/trunk/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,12 +70,6 @@ acpi_status acpi_ev_init_global_lock_handler(void)
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
- /* If Hardware Reduced flag is set, there is no global lock */
-
- if (acpi_gbl_reduced_hardware) {
- return_ACPI_STATUS(AE_OK);
- }
-
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/trunk/drivers/acpi/acpica/evgpe.c b/trunk/drivers/acpi/acpica/evgpe.c
index 9e88cb6fb25e..65c79add3b19 100644
--- a/trunk/drivers/acpi/acpica/evgpe.c
+++ b/trunk/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evgpeblk.c b/trunk/drivers/acpi/acpica/evgpeblk.c
index be75339cd5dd..ca2c41a53311 100644
--- a/trunk/drivers/acpi/acpica/evgpeblk.c
+++ b/trunk/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evgpeinit.c b/trunk/drivers/acpi/acpica/evgpeinit.c
index adf7494da9db..ce9aa9f9a972 100644
--- a/trunk/drivers/acpi/acpica/evgpeinit.c
+++ b/trunk/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evgpeutil.c b/trunk/drivers/acpi/acpica/evgpeutil.c
index 25073932aa10..80a81d0c4a80 100644
--- a/trunk/drivers/acpi/acpica/evgpeutil.c
+++ b/trunk/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evmisc.c b/trunk/drivers/acpi/acpica/evmisc.c
index 84966f416463..d0b331844427 100644
--- a/trunk/drivers/acpi/acpica/evmisc.c
+++ b/trunk/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evregion.c b/trunk/drivers/acpi/acpica/evregion.c
index 1b0180a1b798..f0edf5c43c03 100644
--- a/trunk/drivers/acpi/acpica/evregion.c
+++ b/trunk/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,7 +329,6 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* FUNCTION: acpi_ev_address_space_dispatch
*
* PARAMETERS: region_obj - Internal region object
- * field_obj - Corresponding field. Can be NULL.
* Function - Read or Write operation
* region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
@@ -345,7 +344,6 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
- union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value)
{
@@ -355,7 +353,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *handler_desc;
union acpi_operand_object *region_obj2;
void *region_context = NULL;
- struct acpi_connection_info *context;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -378,8 +375,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
- context = handler_desc->address_space.context;
-
/*
* It may be the case that the region has never been initialized.
* Some types of regions require special init code
@@ -409,7 +404,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ex_exit_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
- context, ®ion_context);
+ handler_desc->address_space.context,
+ ®ion_context);
/* Re-enter the interpreter */
@@ -459,25 +455,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
- /*
- * Special handling for generic_serial_bus and general_purpose_io:
- * There are three extra parameters that must be passed to the
- * handler via the context:
- * 1) Connection buffer, a resource template from Connection() op.
- * 2) Length of the above buffer.
- * 3) Actual access length from the access_as() op.
- */
- if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
- (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
- context && field_obj) {
-
- /* Get the Connection (resource_template) buffer */
-
- context->connection = field_obj->field.resource_buffer;
- context->length = field_obj->field.resource_length;
- context->access_length = field_obj->field.access_length;
- }
-
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
@@ -492,7 +469,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
status = handler(function,
(region_obj->region.address + region_offset),
- bit_width, value, context,
+ bit_width, value, handler_desc->address_space.context,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
diff --git a/trunk/drivers/acpi/acpica/evrgnini.c b/trunk/drivers/acpi/acpica/evrgnini.c
index 819c17f5897a..55a5d35ef34a 100644
--- a/trunk/drivers/acpi/acpica/evrgnini.c
+++ b/trunk/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evsci.c b/trunk/drivers/acpi/acpica/evsci.c
index 26065c612e76..2ebd40e1a3ef 100644
--- a/trunk/drivers/acpi/acpica/evsci.c
+++ b/trunk/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxface.c b/trunk/drivers/acpi/acpica/evxface.c
index 61944e89565a..f4f523bf5939 100644
--- a/trunk/drivers/acpi/acpica/evxface.c
+++ b/trunk/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxfevnt.c b/trunk/drivers/acpi/acpica/evxfevnt.c
index 1768bbec1002..20516e599476 100644
--- a/trunk/drivers/acpi/acpica/evxfevnt.c
+++ b/trunk/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxfgpe.c b/trunk/drivers/acpi/acpica/evxfgpe.c
index 33388fd69df4..f06a3ee356ba 100644
--- a/trunk/drivers/acpi/acpica/evxfgpe.c
+++ b/trunk/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/evxfregn.c b/trunk/drivers/acpi/acpica/evxfregn.c
index 6019208cd4b6..aee887e3ca5c 100644
--- a/trunk/drivers/acpi/acpica/evxfregn.c
+++ b/trunk/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exconfig.c b/trunk/drivers/acpi/acpica/exconfig.c
index c86d44e41bc8..745a42b401f5 100644
--- a/trunk/drivers/acpi/acpica/exconfig.c
+++ b/trunk/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
/* Bytewise reads */
for (i = 0; i < length; i++) {
- status =
- acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
- region_offset, 8, &value);
+ status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
+ region_offset, 8,
+ &value);
if (ACPI_FAILURE(status)) {
return status;
}
diff --git a/trunk/drivers/acpi/acpica/exconvrt.c b/trunk/drivers/acpi/acpica/exconvrt.c
index e385436bd424..74162a11817d 100644
--- a/trunk/drivers/acpi/acpica/exconvrt.c
+++ b/trunk/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/excreate.c b/trunk/drivers/acpi/acpica/excreate.c
index 3f5bc998c1cb..110711afada8 100644
--- a/trunk/drivers/acpi/acpica/excreate.c
+++ b/trunk/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
*
* PARAMETERS: aml_start - Pointer to the region declaration AML
* aml_length - Max length of the declaration AML
- * space_id - Address space ID for the region
+ * region_space - space_iD for the region
* walk_state - Current state
*
* RETURN: Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status
acpi_ex_create_region(u8 * aml_start,
u32 aml_length,
- u8 space_id, struct acpi_walk_state *walk_state)
+ u8 region_space, struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *obj_desc;
@@ -304,19 +304,16 @@ acpi_ex_create_region(u8 * aml_start,
* Space ID must be one of the predefined IDs, or in the user-defined
* range
*/
- if (!acpi_is_valid_space_id(space_id)) {
- /*
- * Print an error message, but continue. We don't want to abort
- * a table load for this exception. Instead, if the region is
- * actually used at runtime, abort the executing method.
- */
- ACPI_ERROR((AE_INFO,
- "Invalid/unknown Address Space ID: 0x%2.2X",
- space_id));
+ if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
+ (region_space < ACPI_USER_REGION_BEGIN) &&
+ (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
+ region_space));
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
- acpi_ut_get_region_name(space_id), space_id));
+ acpi_ut_get_region_name(region_space), region_space));
/* Create the region descriptor */
@@ -333,16 +330,10 @@ acpi_ex_create_region(u8 * aml_start,
region_obj2 = obj_desc->common.next_object;
region_obj2->extra.aml_start = aml_start;
region_obj2->extra.aml_length = aml_length;
- if (walk_state->scope_info) {
- region_obj2->extra.scope_node =
- walk_state->scope_info->scope.node;
- } else {
- region_obj2->extra.scope_node = node;
- }
/* Init the region from the operands */
- obj_desc->region.space_id = space_id;
+ obj_desc->region.space_id = region_space;
obj_desc->region.address = 0;
obj_desc->region.length = 0;
obj_desc->region.node = node;
diff --git a/trunk/drivers/acpi/acpica/exdebug.c b/trunk/drivers/acpi/acpica/exdebug.c
index e211e9c19215..c7a2f1edd282 100644
--- a/trunk/drivers/acpi/acpica/exdebug.c
+++ b/trunk/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exdump.c b/trunk/drivers/acpi/acpica/exdump.c
index 2a6ac0a3bc1e..61b8c0e8b74d 100644
--- a/trunk/drivers/acpi/acpica/exdump.c
+++ b/trunk/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,13 +192,10 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
"Buffer Object"}
};
-static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
+static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
{ACPI_EXD_FIELD, 0, NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
- "ResourceBuffer"}
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
};
static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/trunk/drivers/acpi/acpica/exfield.c b/trunk/drivers/acpi/acpica/exfield.c
index dc092f5b35d6..0bde2230c028 100644
--- a/trunk/drivers/acpi/acpica/exfield.c
+++ b/trunk/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,25 +100,18 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS
- || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
+ * This is an SMBus or IPMI read. We must create a buffer to hold
* the data and then directly access the region handler.
*
- * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
+ * Note: Smbus protocol value is passed in upper 16-bits of Function
*/
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
- } else if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS) {
- length = ACPI_GSBUS_BUFFER_SIZE;
- function =
- ACPI_READ | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -255,23 +248,21 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS
- || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
+ * This is an SMBus or IPMI write. We will bypass the entire field
* mechanism and handoff the buffer directly to the handler. For
* these address spaces, the buffer is bi-directional; on a write,
* return data is returned in the same buffer.
*
* Source must be a buffer of sufficient size:
- * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
+ * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
*
- * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
+ * Note: SMBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
- "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
+ "SMBus or IPMI write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -282,11 +273,6 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
- } else if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS) {
- length = ACPI_GSBUS_BUFFER_SIZE;
- function =
- ACPI_WRITE | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -295,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
+ "SMBus or IPMI write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/trunk/drivers/acpi/acpica/exfldio.c b/trunk/drivers/acpi/acpica/exfldio.c
index 149de45fdadd..f915a7f3f921 100644
--- a/trunk/drivers/acpi/acpica/exfldio.c
+++ b/trunk/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,7 +86,6 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
- u8 space_id;
ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
@@ -102,17 +101,6 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
- space_id = rgn_desc->region.space_id;
-
- /* Validate the Space ID */
-
- if (!acpi_is_valid_space_id(space_id)) {
- ACPI_ERROR((AE_INFO,
- "Invalid/unknown Address Space ID: 0x%2.2X",
- space_id));
- return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
- }
-
/*
* If the Region Address and Length have not been previously evaluated,
* evaluate them now and save the results.
@@ -131,12 +119,11 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
+ * Exit now for SMBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
*/
- if (space_id == ACPI_ADR_SPACE_SMBUS ||
- space_id == ACPI_ADR_SPACE_GSBUS ||
- space_id == ACPI_ADR_SPACE_IPMI) {
+ if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
+ rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
/* SMBus or IPMI has a non-linear address space */
@@ -284,12 +271,11 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
/* Invoke the appropriate address_space/op_region handler */
- status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
- function, region_offset,
- ACPI_MUL_8(obj_desc->
- common_field.
- access_byte_width),
- value);
+ status =
+ acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
+ ACPI_MUL_8(obj_desc->common_field.
+ access_byte_width),
+ value);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
@@ -330,7 +316,6 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
{
- ACPI_FUNCTION_NAME(ex_register_overflow);
if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
/*
@@ -345,11 +330,6 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
* The Value is larger than the maximum value that can fit into
* the register.
*/
- ACPI_ERROR((AE_INFO,
- "Index value 0x%8.8X%8.8X overflows field width 0x%X",
- ACPI_FORMAT_UINT64(value),
- obj_desc->common_field.bit_length));
-
return (TRUE);
}
diff --git a/trunk/drivers/acpi/acpica/exmisc.c b/trunk/drivers/acpi/acpica/exmisc.c
index 0a0893310348..703d88ed0b3d 100644
--- a/trunk/drivers/acpi/acpica/exmisc.c
+++ b/trunk/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exmutex.c b/trunk/drivers/acpi/acpica/exmutex.c
index 60933e9dc3c0..be1c56ead653 100644
--- a/trunk/drivers/acpi/acpica/exmutex.c
+++ b/trunk/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exnames.c b/trunk/drivers/acpi/acpica/exnames.c
index fcc75fa27d32..49ec049c157e 100644
--- a/trunk/drivers/acpi/acpica/exnames.c
+++ b/trunk/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg1.c b/trunk/drivers/acpi/acpica/exoparg1.c
index 9ba8c73cea16..236ead14b7f7 100644
--- a/trunk/drivers/acpi/acpica/exoparg1.c
+++ b/trunk/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg2.c b/trunk/drivers/acpi/acpica/exoparg2.c
index 879e8a277b94..2571b4a310f4 100644
--- a/trunk/drivers/acpi/acpica/exoparg2.c
+++ b/trunk/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg3.c b/trunk/drivers/acpi/acpica/exoparg3.c
index 71fcc65c9ffa..1b48d9d28c9a 100644
--- a/trunk/drivers/acpi/acpica/exoparg3.c
+++ b/trunk/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exoparg6.c b/trunk/drivers/acpi/acpica/exoparg6.c
index 0786b8659061..f4a2787e8e92 100644
--- a/trunk/drivers/acpi/acpica/exoparg6.c
+++ b/trunk/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exprep.c b/trunk/drivers/acpi/acpica/exprep.c
index 30157f5a12d7..cc95e2000406 100644
--- a/trunk/drivers/acpi/acpica/exprep.c
+++ b/trunk/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,6 @@
#include "acinterp.h"
#include "amlcode.h"
#include "acnamesp.h"
-#include "acdispat.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exprep")
@@ -456,30 +455,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.region_obj =
acpi_ns_get_attached_object(info->region_node);
- /* Fields specific to generic_serial_bus fields */
-
- obj_desc->field.access_length = info->access_length;
-
- if (info->connection_node) {
- second_desc = info->connection_node->object;
- if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
- status =
- acpi_ds_get_buffer_arguments(second_desc);
- if (ACPI_FAILURE(status)) {
- acpi_ut_delete_object_desc(obj_desc);
- return_ACPI_STATUS(status);
- }
- }
-
- obj_desc->field.resource_buffer =
- second_desc->buffer.pointer;
- obj_desc->field.resource_length =
- (u16)second_desc->buffer.length;
- } else if (info->resource_buffer) {
- obj_desc->field.resource_buffer = info->resource_buffer;
- obj_desc->field.resource_length = info->resource_length;
- }
-
/* Allow full data read from EC address space */
if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/trunk/drivers/acpi/acpica/exregion.c b/trunk/drivers/acpi/acpica/exregion.c
index 12d51df6d3bf..f0d5e14f1f2c 100644
--- a/trunk/drivers/acpi/acpica/exregion.c
+++ b/trunk/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exresnte.c b/trunk/drivers/acpi/acpica/exresnte.c
index fa50e77e64a8..55997e46948b 100644
--- a/trunk/drivers/acpi/acpica/exresnte.c
+++ b/trunk/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exresolv.c b/trunk/drivers/acpi/acpica/exresolv.c
index 6e335dc34528..db502cd7d934 100644
--- a/trunk/drivers/acpi/acpica/exresolv.c
+++ b/trunk/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exresop.c b/trunk/drivers/acpi/acpica/exresop.c
index a67b1d925ddd..e3bb00ccdff5 100644
--- a/trunk/drivers/acpi/acpica/exresop.c
+++ b/trunk/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exstore.c b/trunk/drivers/acpi/acpica/exstore.c
index c6cf843cc4c9..c0c8842dd344 100644
--- a/trunk/drivers/acpi/acpica/exstore.c
+++ b/trunk/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exstoren.c b/trunk/drivers/acpi/acpica/exstoren.c
index b35bed52e061..a979017d56b8 100644
--- a/trunk/drivers/acpi/acpica/exstoren.c
+++ b/trunk/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exstorob.c b/trunk/drivers/acpi/acpica/exstorob.c
index 65a45d8335c8..dc665cc554de 100644
--- a/trunk/drivers/acpi/acpica/exstorob.c
+++ b/trunk/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exsystem.c b/trunk/drivers/acpi/acpica/exsystem.c
index 191a12945226..df66e7b686be 100644
--- a/trunk/drivers/acpi/acpica/exsystem.c
+++ b/trunk/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/exutils.c b/trunk/drivers/acpi/acpica/exutils.c
index eb6798ba8b59..8ad93146dd32 100644
--- a/trunk/drivers/acpi/acpica/exutils.c
+++ b/trunk/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -435,29 +435,4 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
}
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_is_valid_space_id
- *
- * PARAMETERS: space_id - ID to be validated
- *
- * RETURN: TRUE if valid/supported ID.
- *
- * DESCRIPTION: Validate an operation region space_iD.
- *
- ******************************************************************************/
-
-u8 acpi_is_valid_space_id(u8 space_id)
-{
-
- if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (space_id < ACPI_USER_REGION_BEGIN) &&
- (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
- (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
- return (FALSE);
- }
-
- return (TRUE);
-}
-
#endif
diff --git a/trunk/drivers/acpi/acpica/hwacpi.c b/trunk/drivers/acpi/acpica/hwacpi.c
index d21ec5f0b3a9..fc380d3d45ab 100644
--- a/trunk/drivers/acpi/acpica/hwacpi.c
+++ b/trunk/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwgpe.c b/trunk/drivers/acpi/acpica/hwgpe.c
index 1a6894afef79..f610d88a66be 100644
--- a/trunk/drivers/acpi/acpica/hwgpe.c
+++ b/trunk/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwpci.c b/trunk/drivers/acpi/acpica/hwpci.c
index 1455ddcdc32c..050fd227951b 100644
--- a/trunk/drivers/acpi/acpica/hwpci.c
+++ b/trunk/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwregs.c b/trunk/drivers/acpi/acpica/hwregs.c
index 4ea4eeb51bfd..cc70f3fdcdd1 100644
--- a/trunk/drivers/acpi/acpica/hwregs.c
+++ b/trunk/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwsleep.c b/trunk/drivers/acpi/acpica/hwsleep.c
index 3c4a922a9fc2..d52da3073650 100644
--- a/trunk/drivers/acpi/acpica/hwsleep.c
+++ b/trunk/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwtimer.c b/trunk/drivers/acpi/acpica/hwtimer.c
index d4973d9da9f1..50d21c40b5c1 100644
--- a/trunk/drivers/acpi/acpica/hwtimer.c
+++ b/trunk/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/hwvalid.c b/trunk/drivers/acpi/acpica/hwvalid.c
index 6e5c43a60bb7..5f1605874655 100644
--- a/trunk/drivers/acpi/acpica/hwvalid.c
+++ b/trunk/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,8 +134,6 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
- ACPI_ERROR((AE_INFO,
- "Bad BitWidth parameter: %8.8X", bit_width));
return AE_BAD_PARAMETER;
}
diff --git a/trunk/drivers/acpi/acpica/hwxface.c b/trunk/drivers/acpi/acpica/hwxface.c
index 9d38eb6c0d0b..d707756228c2 100644
--- a/trunk/drivers/acpi/acpica/hwxface.c
+++ b/trunk/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsaccess.c b/trunk/drivers/acpi/acpica/nsaccess.c
index 61623f3f6826..d93172fd15a8 100644
--- a/trunk/drivers/acpi/acpica/nsaccess.c
+++ b/trunk/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsalloc.c b/trunk/drivers/acpi/acpica/nsalloc.c
index 7c3d3ceb98b3..1d0ef15d158f 100644
--- a/trunk/drivers/acpi/acpica/nsalloc.c
+++ b/trunk/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsdump.c b/trunk/drivers/acpi/acpica/nsdump.c
index b7f2b3be79ac..b683cc2ff9d3 100644
--- a/trunk/drivers/acpi/acpica/nsdump.c
+++ b/trunk/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsdumpdv.c b/trunk/drivers/acpi/acpica/nsdumpdv.c
index 30ea5bc53a78..2ed294b7a4db 100644
--- a/trunk/drivers/acpi/acpica/nsdumpdv.c
+++ b/trunk/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nseval.c b/trunk/drivers/acpi/acpica/nseval.c
index f375cb82e321..c1bd02b1a058 100644
--- a/trunk/drivers/acpi/acpica/nseval.c
+++ b/trunk/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsinit.c b/trunk/drivers/acpi/acpica/nsinit.c
index 9d84ec2f0211..fd7c6380e294 100644
--- a/trunk/drivers/acpi/acpica/nsinit.c
+++ b/trunk/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsload.c b/trunk/drivers/acpi/acpica/nsload.c
index 5cbf15ffe7d8..5f7dc691c183 100644
--- a/trunk/drivers/acpi/acpica/nsload.c
+++ b/trunk/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsnames.c b/trunk/drivers/acpi/acpica/nsnames.c
index b20e7c8c3ffb..d5fa520c3de5 100644
--- a/trunk/drivers/acpi/acpica/nsnames.c
+++ b/trunk/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsobject.c b/trunk/drivers/acpi/acpica/nsobject.c
index dd77a3ce6e50..3bb8bf105ea2 100644
--- a/trunk/drivers/acpi/acpica/nsobject.c
+++ b/trunk/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsparse.c b/trunk/drivers/acpi/acpica/nsparse.c
index ec7ba2d3463c..b3234fa795b8 100644
--- a/trunk/drivers/acpi/acpica/nsparse.c
+++ b/trunk/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nspredef.c b/trunk/drivers/acpi/acpica/nspredef.c
index bbe46a447d34..c845c8089f39 100644
--- a/trunk/drivers/acpi/acpica/nspredef.c
+++ b/trunk/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -620,7 +620,6 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_COUNT:
- case ACPI_PTYPE2_FIX_VAR:
/*
* These types all return a single Package that consists of a
@@ -760,34 +759,6 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
}
break;
- case ACPI_PTYPE2_FIX_VAR:
- /*
- * Each subpackage has a fixed number of elements and an
- * optional element
- */
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- sub_package->package.
- count -
- package->ret_info.
- count1, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
case ACPI_PTYPE2_FIXED:
/* Each sub-package has a fixed length */
diff --git a/trunk/drivers/acpi/acpica/nsrepair.c b/trunk/drivers/acpi/acpica/nsrepair.c
index 9c35d20eb52b..ac7b854b0bd7 100644
--- a/trunk/drivers/acpi/acpica/nsrepair.c
+++ b/trunk/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -634,7 +634,6 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_REV_FIXED:
- case ACPI_PTYPE2_FIX_VAR:
break;
default:
diff --git a/trunk/drivers/acpi/acpica/nsrepair2.c b/trunk/drivers/acpi/acpica/nsrepair2.c
index 726bc8e687f7..024c4f263f87 100644
--- a/trunk/drivers/acpi/acpica/nsrepair2.c
+++ b/trunk/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -467,12 +467,11 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
}
/*
- * Copy and uppercase the string. From the ACPI 5.0 specification:
+ * Copy and uppercase the string. From the ACPI specification:
*
* A valid PNP ID must be of the form "AAA####" where A is an uppercase
* letter and # is a hex digit. A valid ACPI ID must be of the form
- * "NNNN####" where N is an uppercase letter or decimal digit, and
- * # is a hex digit.
+ * "ACPI####" where # is a hex digit.
*/
for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)ACPI_TOUPPER(*source);
diff --git a/trunk/drivers/acpi/acpica/nssearch.c b/trunk/drivers/acpi/acpica/nssearch.c
index 507043d66114..28b0d7a62b99 100644
--- a/trunk/drivers/acpi/acpica/nssearch.c
+++ b/trunk/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsutils.c b/trunk/drivers/acpi/acpica/nsutils.c
index a535b7afda5c..cb1b104a69a2 100644
--- a/trunk/drivers/acpi/acpica/nsutils.c
+++ b/trunk/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nswalk.c b/trunk/drivers/acpi/acpica/nswalk.c
index f69895a54895..345f0c3c6ad2 100644
--- a/trunk/drivers/acpi/acpica/nswalk.c
+++ b/trunk/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsxfeval.c b/trunk/drivers/acpi/acpica/nsxfeval.c
index 71d15f61807b..e7f016d1b226 100644
--- a/trunk/drivers/acpi/acpica/nsxfeval.c
+++ b/trunk/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsxfname.c b/trunk/drivers/acpi/acpica/nsxfname.c
index af401c9c4dfc..83bf93024303 100644
--- a/trunk/drivers/acpi/acpica/nsxfname.c
+++ b/trunk/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/nsxfobj.c b/trunk/drivers/acpi/acpica/nsxfobj.c
index 880a605cee20..57e6d825ed84 100644
--- a/trunk/drivers/acpi/acpica/nsxfobj.c
+++ b/trunk/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psargs.c b/trunk/drivers/acpi/acpica/psargs.c
index 5ac36aba507c..e1fad0ee0136 100644
--- a/trunk/drivers/acpi/acpica/psargs.c
+++ b/trunk/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -484,54 +484,34 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state)
{
- u32 aml_offset;
+ u32 aml_offset = (u32)
+ ACPI_PTR_DIFF(parser_state->aml,
+ parser_state->aml_start);
union acpi_parse_object *field;
- union acpi_parse_object *arg = NULL;
u16 opcode;
u32 name;
- u8 access_type;
- u8 access_attribute;
- u8 access_length;
- u32 pkg_length;
- u8 *pkg_end;
- u32 buffer_length;
ACPI_FUNCTION_TRACE(ps_get_next_field);
- aml_offset =
- (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
-
/* Determine field type */
switch (ACPI_GET8(parser_state->aml)) {
- case AML_FIELD_OFFSET_OP:
-
- opcode = AML_INT_RESERVEDFIELD_OP;
- parser_state->aml++;
- break;
-
- case AML_FIELD_ACCESS_OP:
+ default:
- opcode = AML_INT_ACCESSFIELD_OP;
- parser_state->aml++;
+ opcode = AML_INT_NAMEDFIELD_OP;
break;
- case AML_FIELD_CONNECTION_OP:
+ case 0x00:
- opcode = AML_INT_CONNECTION_OP;
+ opcode = AML_INT_RESERVEDFIELD_OP;
parser_state->aml++;
break;
- case AML_FIELD_EXT_ACCESS_OP:
+ case 0x01:
- opcode = AML_INT_EXTACCESSFIELD_OP;
+ opcode = AML_INT_ACCESSFIELD_OP;
parser_state->aml++;
break;
-
- default:
-
- opcode = AML_INT_NAMEDFIELD_OP;
- break;
}
/* Allocate a new field op */
@@ -569,111 +549,16 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
break;
case AML_INT_ACCESSFIELD_OP:
- case AML_INT_EXTACCESSFIELD_OP:
/*
* Get access_type and access_attrib and merge into the field Op
- * access_type is first operand, access_attribute is second. stuff
- * these bytes into the node integer value for convenience.
+ * access_type is first operand, access_attribute is second
*/
-
- /* Get the two bytes (Type/Attribute) */
-
- access_type = ACPI_GET8(parser_state->aml);
+ field->common.value.integer =
+ (((u32) ACPI_GET8(parser_state->aml) << 8));
parser_state->aml++;
- access_attribute = ACPI_GET8(parser_state->aml);
+ field->common.value.integer |= ACPI_GET8(parser_state->aml);
parser_state->aml++;
-
- field->common.value.integer = (u8)access_type;
- field->common.value.integer |= (u16)(access_attribute << 8);
-
- /* This opcode has a third byte, access_length */
-
- if (opcode == AML_INT_EXTACCESSFIELD_OP) {
- access_length = ACPI_GET8(parser_state->aml);
- parser_state->aml++;
-
- field->common.value.integer |=
- (u32)(access_length << 16);
- }
- break;
-
- case AML_INT_CONNECTION_OP:
-
- /*
- * Argument for Connection operator can be either a Buffer
- * (resource descriptor), or a name_string.
- */
- if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
- parser_state->aml++;
-
- pkg_end = parser_state->aml;
- pkg_length =
- acpi_ps_get_next_package_length(parser_state);
- pkg_end += pkg_length;
-
- if (parser_state->aml < pkg_end) {
-
- /* Non-empty list */
-
- arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
- if (!arg) {
- return_PTR(NULL);
- }
-
- /* Get the actual buffer length argument */
-
- opcode = ACPI_GET8(parser_state->aml);
- parser_state->aml++;
-
- switch (opcode) {
- case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
- buffer_length =
- ACPI_GET8(parser_state->aml);
- parser_state->aml += 1;
- break;
-
- case AML_WORD_OP: /* AML_WORDDATA_ARG */
- buffer_length =
- ACPI_GET16(parser_state->aml);
- parser_state->aml += 2;
- break;
-
- case AML_DWORD_OP: /* AML_DWORDATA_ARG */
- buffer_length =
- ACPI_GET32(parser_state->aml);
- parser_state->aml += 4;
- break;
-
- default:
- buffer_length = 0;
- break;
- }
-
- /* Fill in bytelist data */
-
- arg->named.value.size = buffer_length;
- arg->named.data = parser_state->aml;
- }
-
- /* Skip to End of byte data */
-
- parser_state->aml = pkg_end;
- } else {
- arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
- if (!arg) {
- return_PTR(NULL);
- }
-
- /* Get the Namestring argument */
-
- arg->common.value.name =
- acpi_ps_get_next_namestring(parser_state);
- }
-
- /* Link the buffer/namestring to parent (CONNECTION_OP) */
-
- acpi_ps_append_arg(field, arg);
break;
default:
diff --git a/trunk/drivers/acpi/acpica/psloop.c b/trunk/drivers/acpi/acpica/psloop.c
index 9547ad8a620b..01dd70d1de51 100644
--- a/trunk/drivers/acpi/acpica/psloop.c
+++ b/trunk/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psopcode.c b/trunk/drivers/acpi/acpica/psopcode.c
index a0226fdcf75c..bed08de7528c 100644
--- a/trunk/drivers/acpi/acpica/psopcode.c
+++ b/trunk/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -638,16 +638,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R),
-
-/* ACPI 5.0 opcodes */
-
-/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
- ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
-/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
- ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
- AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
+ AML_FLAGS_EXEC_0A_0T_1R)
/*! [End] no source code translation !*/
};
@@ -666,7 +657,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/trunk/drivers/acpi/acpica/psparse.c b/trunk/drivers/acpi/acpica/psparse.c
index 2ff9c35a1968..9bb0cbd37b5e 100644
--- a/trunk/drivers/acpi/acpica/psparse.c
+++ b/trunk/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psscope.c b/trunk/drivers/acpi/acpica/psscope.c
index c872aa4b926e..a5faa1323a02 100644
--- a/trunk/drivers/acpi/acpica/psscope.c
+++ b/trunk/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/pstree.c b/trunk/drivers/acpi/acpica/pstree.c
index 2b03cdbbe1c0..f1464c03aa42 100644
--- a/trunk/drivers/acpi/acpica/pstree.c
+++ b/trunk/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,12 +74,6 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
ACPI_FUNCTION_ENTRY();
-/*
- if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
- {
- return (Op->Common.Value.Arg);
- }
-*/
/* Get the info structure for this opcode */
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/trunk/drivers/acpi/acpica/psutils.c b/trunk/drivers/acpi/acpica/psutils.c
index 13bb131ae125..7eda78503422 100644
--- a/trunk/drivers/acpi/acpica/psutils.c
+++ b/trunk/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/pswalk.c b/trunk/drivers/acpi/acpica/pswalk.c
index ab96cf47896d..3312d6368bf1 100644
--- a/trunk/drivers/acpi/acpica/pswalk.c
+++ b/trunk/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/psxface.c b/trunk/drivers/acpi/acpica/psxface.c
index 9d98c5ff66a5..8086805d4494 100644
--- a/trunk/drivers/acpi/acpica/psxface.c
+++ b/trunk/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rsaddr.c b/trunk/drivers/acpi/acpica/rsaddr.c
index a0305652394f..9e66f9078426 100644
--- a/trunk/drivers/acpi/acpica/rsaddr.c
+++ b/trunk/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rscalc.c b/trunk/drivers/acpi/acpica/rscalc.c
index 3c6df4b7eb2d..3a8a89ec2ca4 100644
--- a/trunk/drivers/acpi/acpica/rscalc.c
+++ b/trunk/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -313,38 +313,6 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
resource_source));
break;
- case ACPI_RESOURCE_TYPE_GPIO:
-
- total_size =
- (acpi_rs_length) (total_size +
- (resource->data.gpio.
- pin_table_length * 2) +
- resource->data.gpio.
- resource_source.string_length +
- resource->data.gpio.
- vendor_length);
-
- break;
-
- case ACPI_RESOURCE_TYPE_SERIAL_BUS:
-
- total_size =
- acpi_gbl_aml_resource_serial_bus_sizes[resource->
- data.
- common_serial_bus.
- type];
-
- total_size = (acpi_rs_length) (total_size +
- resource->data.
- i2c_serial_bus.
- resource_source.
- string_length +
- resource->data.
- i2c_serial_bus.
- vendor_length);
-
- break;
-
default:
break;
}
@@ -394,11 +362,10 @@ acpi_rs_get_list_length(u8 * aml_buffer,
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
- union aml_resource *aml_resource;
ACPI_FUNCTION_TRACE(rs_get_list_length);
- *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */
+ *size_needed = 0;
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
@@ -409,15 +376,9 @@ acpi_rs_get_list_length(u8 * aml_buffer,
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
if (ACPI_FAILURE(status)) {
- /*
- * Exit on failure. Cannot continue because the descriptor length
- * may be bogus also.
- */
return_ACPI_STATUS(status);
}
- aml_resource = (void *)aml_buffer;
-
/* Get the resource length and base (minimum) AML size */
resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -461,8 +422,10 @@ acpi_rs_get_list_length(u8 * aml_buffer,
case ACPI_RESOURCE_NAME_END_TAG:
/*
- * End Tag: This is the normal exit
+ * End Tag:
+ * This is the normal exit, add size of end_tag
*/
+ *size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -494,33 +457,6 @@ acpi_rs_get_list_length(u8 * aml_buffer,
minimum_aml_resource_length);
break;
- case ACPI_RESOURCE_NAME_GPIO:
-
- /* Vendor data is optional */
-
- if (aml_resource->gpio.vendor_length) {
- extra_struct_bytes +=
- aml_resource->gpio.vendor_offset -
- aml_resource->gpio.pin_table_offset +
- aml_resource->gpio.vendor_length;
- } else {
- extra_struct_bytes +=
- aml_resource->large_header.resource_length +
- sizeof(struct aml_resource_large_header) -
- aml_resource->gpio.pin_table_offset;
- }
- break;
-
- case ACPI_RESOURCE_NAME_SERIAL_BUS:
-
- minimum_aml_resource_length =
- acpi_gbl_resource_aml_serial_bus_sizes
- [aml_resource->common_serial_bus.type];
- extra_struct_bytes +=
- aml_resource->common_serial_bus.resource_length -
- minimum_aml_resource_length;
- break;
-
default:
break;
}
@@ -531,18 +467,9 @@ acpi_rs_get_list_length(u8 * aml_buffer,
* Important: Round the size up for the appropriate alignment. This
* is a requirement on IA64.
*/
- if (acpi_ut_get_resource_type(aml_buffer) ==
- ACPI_RESOURCE_NAME_SERIAL_BUS) {
- buffer_size =
- acpi_gbl_resource_struct_serial_bus_sizes
- [aml_resource->common_serial_bus.type] +
- extra_struct_bytes;
- } else {
- buffer_size =
- acpi_gbl_resource_struct_sizes[resource_index] +
- extra_struct_bytes;
- }
- buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+ buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
*size_needed += buffer_size;
diff --git a/trunk/drivers/acpi/acpica/rscreate.c b/trunk/drivers/acpi/acpica/rscreate.c
index 46d6eb38ae66..4ce6e1147e80 100644
--- a/trunk/drivers/acpi/acpica/rscreate.c
+++ b/trunk/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,70 +49,6 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rscreate")
-/*******************************************************************************
- *
- * FUNCTION: acpi_buffer_to_resource
- *
- * PARAMETERS: aml_buffer - Pointer to the resource byte stream
- * aml_buffer_length - Length of the aml_buffer
- * resource_ptr - Where the converted resource is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Convert a raw AML buffer to a resource list
- *
- ******************************************************************************/
-acpi_status
-acpi_buffer_to_resource(u8 *aml_buffer,
- u16 aml_buffer_length,
- struct acpi_resource **resource_ptr)
-{
- acpi_status status;
- acpi_size list_size_needed;
- void *resource;
- void *current_resource_ptr;
-
- /*
- * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
- * is not required here.
- */
-
- /* Get the required length for the converted resource */
-
- status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
- &list_size_needed);
- if (status == AE_AML_NO_RESOURCE_END_TAG) {
- status = AE_OK;
- }
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Allocate a buffer for the converted resource */
-
- resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
- current_resource_ptr = resource;
- if (!resource) {
- return (AE_NO_MEMORY);
- }
-
- /* Perform the AML-to-Resource conversion */
-
- status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
- acpi_rs_convert_aml_to_resources,
- ¤t_resource_ptr);
- if (status == AE_AML_NO_RESOURCE_END_TAG) {
- status = AE_OK;
- }
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(resource);
- } else {
- *resource_ptr = resource;
- }
-
- return (status);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_rs_create_resource_list
@@ -130,10 +66,9 @@ acpi_buffer_to_resource(u8 *aml_buffer,
* of device resources.
*
******************************************************************************/
-
acpi_status
acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
- struct acpi_buffer * output_buffer)
+ struct acpi_buffer *output_buffer)
{
acpi_status status;
diff --git a/trunk/drivers/acpi/acpica/rsdump.c b/trunk/drivers/acpi/acpica/rsdump.c
index b4c581132393..33db7520c74b 100644
--- a/trunk/drivers/acpi/acpica/rsdump.c
+++ b/trunk/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,13 +61,11 @@ static void acpi_rs_out_integer64(char *title, u64 value);
static void acpi_rs_out_title(char *title);
-static void acpi_rs_dump_byte_list(u16 length, u8 *data);
+static void acpi_rs_dump_byte_list(u16 length, u8 * data);
-static void acpi_rs_dump_word_list(u16 length, u16 *data);
+static void acpi_rs_dump_dword_list(u8 length, u32 * data);
-static void acpi_rs_dump_dword_list(u8 length, u32 *data);
-
-static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
+static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
static void
acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -311,125 +309,6 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
};
-struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
- "ConnectionType", acpi_gbl_ct_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
- "ProducerConsumer", acpi_gbl_consume_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
- acpi_gbl_ppc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
- acpi_gbl_shr_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
- "IoRestriction", acpi_gbl_ior_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
- acpi_gbl_he_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
- "DebounceTimeout", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
- "ResourceSource", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
- "PinTableLength", NULL},
- {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
- NULL},
- {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
- NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
- "FixedDma", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
- "RequestLines", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
- acpi_gbl_dts_decode},
-};
-
-#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
- {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
-
-struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
- "Common Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS
-};
-
-struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
- "I2C Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
- ACPI_RSD_OFFSET(i2c_serial_bus.
- access_mode),
- "AccessMode", acpi_gbl_am_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
- "ConnectionSpeed", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
- "SlaveAddress", NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
- "Spi Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
- ACPI_RSD_OFFSET(spi_serial_bus.
- wire_mode), "WireMode",
- acpi_gbl_wm_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
- "DevicePolarity", acpi_gbl_dp_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
- "DataBitLength", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
- "ClockPhase", acpi_gbl_cph_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
- "ClockPolarity", acpi_gbl_cpo_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
- "DeviceSelection", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
- "ConnectionSpeed", NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
- "Uart Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
- ACPI_RSD_OFFSET(uart_serial_bus.
- flow_control),
- "FlowControl", acpi_gbl_fc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
- "StopBits", acpi_gbl_sb_decode},
- {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
- "DataBits", acpi_gbl_bpb_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
- acpi_gbl_ed_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
- acpi_gbl_pt_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
- "LinesEnabled", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
- "RxFifoSize", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
- "TxFifoSize", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
- "ConnectionSpeed", NULL},
-};
-
/*
* Tables used for common address descriptor flag fields
*/
@@ -534,14 +413,7 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/* Data items, 8/16/32/64 bit */
case ACPI_RSD_UINT8:
- if (table->pointer) {
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer
- [*target]));
- } else {
- acpi_rs_out_integer8(name, ACPI_GET8(target));
- }
+ acpi_rs_out_integer8(name, ACPI_GET8(target));
break;
case ACPI_RSD_UINT16:
@@ -572,13 +444,6 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
0x03]));
break;
- case ACPI_RSD_3BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x07]));
- break;
-
case ACPI_RSD_SHORTLIST:
/*
* Short byte list (single line output) for DMA and IRQ resources
@@ -591,20 +456,6 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
- case ACPI_RSD_SHORTLISTX:
- /*
- * Short byte list (single line output) for GPIO vendor data
- * Note: The list length is obtained from the previous table entry
- */
- if (previous_target) {
- acpi_rs_out_title(name);
- acpi_rs_dump_short_byte_list(*previous_target,
- *
- (ACPI_CAST_INDIRECT_PTR
- (u8, target)));
- }
- break;
-
case ACPI_RSD_LONGLIST:
/*
* Long byte list for Vendor resource data
@@ -629,18 +480,6 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
- case ACPI_RSD_WORDLIST:
- /*
- * Word list for GPIO Pin Table
- * Note: The list length is obtained from the previous table entry
- */
- if (previous_target) {
- acpi_rs_dump_word_list(*previous_target,
- *(ACPI_CAST_INDIRECT_PTR
- (u16, target)));
- }
- break;
-
case ACPI_RSD_ADDRESS:
/*
* Common flags for all Address resources
@@ -788,20 +627,14 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
/* Dump the resource descriptor */
- if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_serial_bus_dispatch
- [resource_list->data.
- common_serial_bus.type]);
- } else {
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_resource_dispatch
- [type]);
- }
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch[type]);
/* Point to the next resource structure */
- resource_list = ACPI_NEXT_RESOURCE(resource_list);
+ resource_list =
+ ACPI_ADD_PTR(struct acpi_resource, resource_list,
+ resource_list->length);
/* Exit when END_TAG descriptor is reached */
@@ -935,13 +768,4 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
}
}
-static void acpi_rs_dump_word_list(u16 length, u16 *data)
-{
- u16 i;
-
- for (i = 0; i < length; i++) {
- acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
- }
-}
-
#endif
diff --git a/trunk/drivers/acpi/acpica/rsinfo.c b/trunk/drivers/acpi/acpica/rsinfo.c
index a9fa5158200b..f9ea60872aa4 100644
--- a/trunk/drivers/acpi/acpica/rsinfo.c
+++ b/trunk/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,10 +76,7 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
- acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
- acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
- NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
+ acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
};
/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -97,7 +94,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
- acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
+ NULL, /* 0x0A, Reserved */
NULL, /* 0x0B, Reserved */
NULL, /* 0x0C, Reserved */
NULL, /* 0x0D, Reserved */
@@ -117,19 +114,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
- acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
- acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
- NULL, /* 0x0D, Reserved */
- NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
-};
-
-/* Subtype table for serial_bus -- I2C, SPI, and UART */
-
-struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
- NULL,
- acpi_rs_convert_i2c_serial_bus,
- acpi_rs_convert_spi_serial_bus,
- acpi_rs_convert_uart_serial_bus,
+ acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
};
#ifdef ACPI_FUTURE_USAGE
@@ -155,16 +140,6 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
- acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */
- acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
- NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
-};
-
-struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
- NULL,
- acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */
- acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */
- acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */
};
#endif
@@ -191,10 +166,7 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
- sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */
- sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */
- sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+ sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
};
const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -210,7 +182,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE_MIN,
ACPI_RS_SIZE(struct acpi_resource_io),
ACPI_RS_SIZE(struct acpi_resource_fixed_io),
- ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
+ 0,
0,
0,
0,
@@ -230,21 +202,5 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE(struct acpi_resource_address16),
ACPI_RS_SIZE(struct acpi_resource_extended_irq),
ACPI_RS_SIZE(struct acpi_resource_address64),
- ACPI_RS_SIZE(struct acpi_resource_extended_address64),
- ACPI_RS_SIZE(struct acpi_resource_gpio),
- ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
-};
-
-const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
- 0,
- sizeof(struct aml_resource_i2c_serialbus),
- sizeof(struct aml_resource_spi_serialbus),
- sizeof(struct aml_resource_uart_serialbus),
-};
-
-const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
- 0,
- ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
- ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
- ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_extended_address64)
};
diff --git a/trunk/drivers/acpi/acpica/rsio.c b/trunk/drivers/acpi/acpica/rsio.c
index f6a081057a22..0c7efef008be 100644
--- a/trunk/drivers/acpi/acpica/rsio.c
+++ b/trunk/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rsirq.c b/trunk/drivers/acpi/acpica/rsirq.c
index e23a9ec248cb..50b8ad211167 100644
--- a/trunk/drivers/acpi/acpica/rsirq.c
+++ b/trunk/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,34 +264,3 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
AML_OFFSET(dma.dma_channel_mask),
ACPI_RS_OFFSET(data.dma.channel_count)}
};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_fixed_dma
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
- {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
- ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
- ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
-
- {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
- sizeof(struct aml_resource_fixed_dma),
- 0},
-
- /*
- * These fields are contiguous in both the source and destination:
- * request_lines
- * Channels
- */
-
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
- AML_OFFSET(fixed_dma.request_lines),
- 2},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
- AML_OFFSET(fixed_dma.width),
- 1},
-
-};
diff --git a/trunk/drivers/acpi/acpica/rslist.c b/trunk/drivers/acpi/acpica/rslist.c
index 9be129f5d6f4..1bfcef736c50 100644
--- a/trunk/drivers/acpi/acpica/rslist.c
+++ b/trunk/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,8 +70,6 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
struct acpi_resource **resource_ptr =
ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
struct acpi_resource *resource;
- union aml_resource *aml_resource;
- struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -86,37 +84,14 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
"Misaligned resource pointer %p", resource));
}
- /* Get the appropriate conversion info table */
-
- aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
- if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- if (aml_resource->common_serial_bus.type >
- AML_RESOURCE_MAX_SERIALBUSTYPE) {
- conversion_table = NULL;
- } else {
- /* This is an I2C, SPI, or UART serial_bus descriptor */
-
- conversion_table =
- acpi_gbl_convert_resource_serial_bus_dispatch
- [aml_resource->common_serial_bus.type];
- }
- } else {
- conversion_table =
- acpi_gbl_get_resource_dispatch[resource_index];
- }
-
- if (!conversion_table) {
- ACPI_ERROR((AE_INFO,
- "Invalid/unsupported resource descriptor: Type 0x%2.2X",
- resource_index));
- return (AE_AML_INVALID_RESOURCE_TYPE);
- }
-
/* Convert the AML byte stream resource to a local resource struct */
status =
- acpi_rs_convert_aml_to_resource(resource, aml_resource,
- conversion_table);
+ acpi_rs_convert_aml_to_resource(resource,
+ ACPI_CAST_PTR(union aml_resource,
+ aml),
+ acpi_gbl_get_resource_dispatch
+ [resource_index]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert AML resource (Type 0x%X)",
@@ -131,7 +106,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
/* Point to the next structure in the output buffer */
- *resource_ptr = ACPI_NEXT_RESOURCE(resource);
+ *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
return_ACPI_STATUS(AE_OK);
}
@@ -160,7 +135,6 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
{
u8 *aml = output_buffer;
u8 *end_aml = output_buffer + aml_size_needed;
- struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -180,34 +154,11 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform the conversion */
- if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- if (resource->data.common_serial_bus.type >
- AML_RESOURCE_MAX_SERIALBUSTYPE) {
- conversion_table = NULL;
- } else {
- /* This is an I2C, SPI, or UART serial_bus descriptor */
-
- conversion_table =
- acpi_gbl_convert_resource_serial_bus_dispatch
- [resource->data.common_serial_bus.type];
- }
- } else {
- conversion_table =
- acpi_gbl_set_resource_dispatch[resource->type];
- }
-
- if (!conversion_table) {
- ACPI_ERROR((AE_INFO,
- "Invalid/unsupported resource descriptor: Type 0x%2.2X",
- resource->type));
- return (AE_AML_INVALID_RESOURCE_TYPE);
- }
-
- status = acpi_rs_convert_resource_to_aml(resource,
- ACPI_CAST_PTR(union
- aml_resource,
- aml),
- conversion_table);
+ status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
+ aml_resource,
+ aml),
+ acpi_gbl_set_resource_dispatch
+ [resource->type]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert resource (type 0x%X) to AML",
@@ -241,7 +192,9 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Point to the next input resource descriptor */
- resource = ACPI_NEXT_RESOURCE(resource);
+ resource =
+ ACPI_ADD_PTR(struct acpi_resource, resource,
+ resource->length);
}
/* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/trunk/drivers/acpi/acpica/rsmemory.c b/trunk/drivers/acpi/acpica/rsmemory.c
index 4fd611ad02b4..7cc6d8625f1e 100644
--- a/trunk/drivers/acpi/acpica/rsmemory.c
+++ b/trunk/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/rsmisc.c b/trunk/drivers/acpi/acpica/rsmisc.c
index 8073b371cc7c..410264b22a29 100644
--- a/trunk/drivers/acpi/acpica/rsmisc.c
+++ b/trunk/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,10 +83,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
- if (!info) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
if (((acpi_size) resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
@@ -105,6 +101,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* table length (# of table entries)
*/
count = INIT_TABLE_LENGTH(info);
+
while (count) {
/*
* Source is the external AML byte stream buffer,
@@ -148,14 +145,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
((ACPI_GET8(source) >> info->value) & 0x03);
break;
- case ACPI_RSC_3BITFLAG:
- /*
- * Mask and shift the flag bits
- */
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x07);
- break;
-
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -174,69 +163,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
(info->value * (item_count - 1));
break;
- case ACPI_RSC_COUNT_GPIO_PIN:
-
- target = ACPI_ADD_PTR(void, aml, info->value);
- item_count = ACPI_GET16(target) - ACPI_GET16(source);
-
- resource->length = resource->length + item_count;
- item_count = item_count / 2;
- ACPI_SET16(destination) = item_count;
- break;
-
- case ACPI_RSC_COUNT_GPIO_VEN:
-
- item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8)item_count;
-
- resource->length = resource->length +
- (info->value * item_count);
- break;
-
- case ACPI_RSC_COUNT_GPIO_RES:
-
- /*
- * Vendor data is optional (length/offset may both be zero)
- * Examine vendor data length field first
- */
- target = ACPI_ADD_PTR(void, aml, (info->value + 2));
- if (ACPI_GET16(target)) {
-
- /* Use vendor offset to get resource source length */
-
- target = ACPI_ADD_PTR(void, aml, info->value);
- item_count =
- ACPI_GET16(target) - ACPI_GET16(source);
- } else {
- /* No vendor data to worry about */
-
- item_count = aml->large_header.resource_length +
- sizeof(struct aml_resource_large_header) -
- ACPI_GET16(source);
- }
-
- resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
- break;
-
- case ACPI_RSC_COUNT_SERIAL_VEN:
-
- item_count = ACPI_GET16(source) - info->value;
-
- resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
- break;
-
- case ACPI_RSC_COUNT_SERIAL_RES:
-
- item_count = (aml_resource_length +
- sizeof(struct aml_resource_large_header))
- - ACPI_GET16(source) - info->value;
-
- resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
- break;
-
case ACPI_RSC_LENGTH:
resource->length = resource->length + info->value;
@@ -257,72 +183,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info->opcode);
break;
- case ACPI_RSC_MOVE_GPIO_PIN:
-
- /* Generate and set the PIN data pointer */
-
- target = (char *)ACPI_ADD_PTR(void, resource,
- (resource->length -
- item_count * 2));
- *(u16 **)destination = ACPI_CAST_PTR(u16, target);
-
- /* Copy the PIN data */
-
- source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
- acpi_rs_move_data(target, source, item_count,
- info->opcode);
- break;
-
- case ACPI_RSC_MOVE_GPIO_RES:
-
- /* Generate and set the resource_source string pointer */
-
- target = (char *)ACPI_ADD_PTR(void, resource,
- (resource->length -
- item_count));
- *(u8 **)destination = ACPI_CAST_PTR(u8, target);
-
- /* Copy the resource_source string */
-
- source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
- acpi_rs_move_data(target, source, item_count,
- info->opcode);
- break;
-
- case ACPI_RSC_MOVE_SERIAL_VEN:
-
- /* Generate and set the Vendor Data pointer */
-
- target = (char *)ACPI_ADD_PTR(void, resource,
- (resource->length -
- item_count));
- *(u8 **)destination = ACPI_CAST_PTR(u8, target);
-
- /* Copy the Vendor Data */
-
- source = ACPI_ADD_PTR(void, aml, info->value);
- acpi_rs_move_data(target, source, item_count,
- info->opcode);
- break;
-
- case ACPI_RSC_MOVE_SERIAL_RES:
-
- /* Generate and set the resource_source string pointer */
-
- target = (char *)ACPI_ADD_PTR(void, resource,
- (resource->length -
- item_count));
- *(u8 **)destination = ACPI_CAST_PTR(u8, target);
-
- /* Copy the resource_source string */
-
- source =
- ACPI_ADD_PTR(void, aml,
- (ACPI_GET16(source) + info->value));
- acpi_rs_move_data(target, source, item_count,
- info->opcode);
- break;
-
case ACPI_RSC_SET8:
ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -359,18 +219,13 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* Optional resource_source (Index and String). This is the more
* complicated case used by the Interrupt() macro
*/
- target = ACPI_ADD_PTR(char, resource,
- info->aml_offset +
- (item_count * 4));
+ target =
+ ACPI_ADD_PTR(char, resource,
+ info->aml_offset + (item_count * 4));
resource->length +=
acpi_rs_get_resource_source(aml_resource_length,
- (acpi_rs_length)
- (((item_count -
- 1) * sizeof(u32)) +
- info->value),
- destination, aml,
- target);
+ (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
break;
case ACPI_RSC_BITMASK:
@@ -472,7 +327,6 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
{
void *source = NULL;
void *destination;
- char *target;
acpi_rsdesc_size aml_length = 0;
u8 count;
u16 temp16 = 0;
@@ -480,10 +334,6 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
- if (!info) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
* table length (# of table entries)
@@ -533,14 +383,6 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
((ACPI_GET8(source) & 0x03) << info->value);
break;
- case ACPI_RSC_3BITFLAG:
- /*
- * Mask and shift the flag bits
- */
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x07) << info->value);
- break;
-
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -558,63 +400,6 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
acpi_rs_set_resource_length(aml_length, aml);
break;
- case ACPI_RSC_COUNT_GPIO_PIN:
-
- item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)aml_length;
-
- aml_length = (u16)(aml_length + item_count * 2);
- target = ACPI_ADD_PTR(void, aml, info->value);
- ACPI_SET16(target) = (u16)aml_length;
- acpi_rs_set_resource_length(aml_length, aml);
- break;
-
- case ACPI_RSC_COUNT_GPIO_VEN:
-
- item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)item_count;
-
- aml_length =
- (u16)(aml_length + (info->value * item_count));
- acpi_rs_set_resource_length(aml_length, aml);
- break;
-
- case ACPI_RSC_COUNT_GPIO_RES:
-
- /* Set resource source string length */
-
- item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)aml_length;
-
- /* Compute offset for the Vendor Data */
-
- aml_length = (u16)(aml_length + item_count);
- target = ACPI_ADD_PTR(void, aml, info->value);
-
- /* Set vendor offset only if there is vendor data */
-
- if (resource->data.gpio.vendor_length) {
- ACPI_SET16(target) = (u16)aml_length;
- }
-
- acpi_rs_set_resource_length(aml_length, aml);
- break;
-
- case ACPI_RSC_COUNT_SERIAL_VEN:
-
- item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = item_count + info->value;
- aml_length = (u16)(aml_length + item_count);
- acpi_rs_set_resource_length(aml_length, aml);
- break;
-
- case ACPI_RSC_COUNT_SERIAL_RES:
-
- item_count = ACPI_GET16(source);
- aml_length = (u16)(aml_length + item_count);
- acpi_rs_set_resource_length(aml_length, aml);
- break;
-
case ACPI_RSC_LENGTH:
acpi_rs_set_resource_length(info->value, aml);
@@ -632,48 +417,6 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info->opcode);
break;
- case ACPI_RSC_MOVE_GPIO_PIN:
-
- destination = (char *)ACPI_ADD_PTR(void, aml,
- ACPI_GET16
- (destination));
- source = *(u16 **)source;
- acpi_rs_move_data(destination, source, item_count,
- info->opcode);
- break;
-
- case ACPI_RSC_MOVE_GPIO_RES:
-
- /* Used for both resource_source string and vendor_data */
-
- destination = (char *)ACPI_ADD_PTR(void, aml,
- ACPI_GET16
- (destination));
- source = *(u8 **)source;
- acpi_rs_move_data(destination, source, item_count,
- info->opcode);
- break;
-
- case ACPI_RSC_MOVE_SERIAL_VEN:
-
- destination = (char *)ACPI_ADD_PTR(void, aml,
- (aml_length -
- item_count));
- source = *(u8 **)source;
- acpi_rs_move_data(destination, source, item_count,
- info->opcode);
- break;
-
- case ACPI_RSC_MOVE_SERIAL_RES:
-
- destination = (char *)ACPI_ADD_PTR(void, aml,
- (aml_length -
- item_count));
- source = *(u8 **)source;
- acpi_rs_move_data(destination, source, item_count,
- info->opcode);
- break;
-
case ACPI_RSC_ADDRESS:
/* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/trunk/drivers/acpi/acpica/rsserial.c b/trunk/drivers/acpi/acpica/rsserial.c
deleted file mode 100644
index 9aa5e689b444..000000000000
--- a/trunk/drivers/acpi/acpica/rsserial.c
+++ /dev/null
@@ -1,441 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsserial - GPIO/serial_bus resource descriptors
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2012, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include
-#include "accommon.h"
-#include "acresrc.h"
-
-#define _COMPONENT ACPI_RESOURCES
-ACPI_MODULE_NAME("rsserial")
-
-/*******************************************************************************
- *
- * acpi_rs_convert_gpio
- *
- ******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
- {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
- ACPI_RS_SIZE(struct acpi_resource_gpio),
- ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
-
- {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
- sizeof(struct aml_resource_gpio),
- 0},
-
- /*
- * These fields are contiguous in both the source and destination:
- * revision_id
- * connection_type
- */
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
- AML_OFFSET(gpio.revision_id),
- 2},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
- AML_OFFSET(gpio.flags),
- 0},
-
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
- AML_OFFSET(gpio.int_flags),
- 3},
-
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
- AML_OFFSET(gpio.int_flags),
- 0},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
- AML_OFFSET(gpio.int_flags),
- 0},
-
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
- AML_OFFSET(gpio.int_flags),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
- AML_OFFSET(gpio.pin_config),
- 1},
-
- /*
- * These fields are contiguous in both the source and destination:
- * drive_strength
- * debounce_timeout
- */
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
- AML_OFFSET(gpio.drive_strength),
- 2},
-
- /* Pin Table */
-
- {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
- AML_OFFSET(gpio.pin_table_offset),
- AML_OFFSET(gpio.res_source_offset)},
-
- {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
- AML_OFFSET(gpio.pin_table_offset),
- 0},
-
- /* Resource Source */
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
- AML_OFFSET(gpio.res_source_index),
- 1},
-
- {ACPI_RSC_COUNT_GPIO_RES,
- ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
- AML_OFFSET(gpio.res_source_offset),
- AML_OFFSET(gpio.vendor_offset)},
-
- {ACPI_RSC_MOVE_GPIO_RES,
- ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
- AML_OFFSET(gpio.res_source_offset),
- 0},
-
- /* Vendor Data */
-
- {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
- AML_OFFSET(gpio.vendor_length),
- 1},
-
- {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
- AML_OFFSET(gpio.vendor_offset),
- 0},
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_i2c_serial_bus
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
- {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
- ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
- ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
-
- {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
- sizeof(struct aml_resource_i2c_serialbus),
- 0},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
- AML_OFFSET(common_serial_bus.revision_id),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
- AML_OFFSET(common_serial_bus.type),
- 1},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
- AML_OFFSET(common_serial_bus.flags),
- 0},
-
- {ACPI_RSC_1BITFLAG,
- ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
- AML_OFFSET(common_serial_bus.flags),
- 1},
-
- {ACPI_RSC_MOVE8,
- ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
- AML_OFFSET(common_serial_bus.type_revision_id),
- 1},
-
- {ACPI_RSC_MOVE16,
- ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- 1},
-
- /* Vendor data */
-
- {ACPI_RSC_COUNT_SERIAL_VEN,
- ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- AML_RESOURCE_I2C_MIN_DATA_LEN},
-
- {ACPI_RSC_MOVE_SERIAL_VEN,
- ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
- 0,
- sizeof(struct aml_resource_i2c_serialbus)},
-
- /* Resource Source */
-
- {ACPI_RSC_MOVE8,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
- AML_OFFSET(common_serial_bus.res_source_index),
- 1},
-
- {ACPI_RSC_COUNT_SERIAL_RES,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- sizeof(struct aml_resource_common_serialbus)},
-
- {ACPI_RSC_MOVE_SERIAL_RES,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
- AML_OFFSET(common_serial_bus.type_data_length),
- sizeof(struct aml_resource_common_serialbus)},
-
- /* I2C bus type specific */
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
- AML_OFFSET(i2c_serial_bus.type_specific_flags),
- 0},
-
- {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
- AML_OFFSET(i2c_serial_bus.connection_speed),
- 1},
-
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
- AML_OFFSET(i2c_serial_bus.slave_address),
- 1},
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_spi_serial_bus
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
- {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
- ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
- ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
-
- {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
- sizeof(struct aml_resource_spi_serialbus),
- 0},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
- AML_OFFSET(common_serial_bus.revision_id),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
- AML_OFFSET(common_serial_bus.type),
- 1},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
- AML_OFFSET(common_serial_bus.flags),
- 0},
-
- {ACPI_RSC_1BITFLAG,
- ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
- AML_OFFSET(common_serial_bus.flags),
- 1},
-
- {ACPI_RSC_MOVE8,
- ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
- AML_OFFSET(common_serial_bus.type_revision_id),
- 1},
-
- {ACPI_RSC_MOVE16,
- ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- 1},
-
- /* Vendor data */
-
- {ACPI_RSC_COUNT_SERIAL_VEN,
- ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- AML_RESOURCE_SPI_MIN_DATA_LEN},
-
- {ACPI_RSC_MOVE_SERIAL_VEN,
- ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
- 0,
- sizeof(struct aml_resource_spi_serialbus)},
-
- /* Resource Source */
-
- {ACPI_RSC_MOVE8,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
- AML_OFFSET(common_serial_bus.res_source_index),
- 1},
-
- {ACPI_RSC_COUNT_SERIAL_RES,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- sizeof(struct aml_resource_common_serialbus)},
-
- {ACPI_RSC_MOVE_SERIAL_RES,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
- AML_OFFSET(common_serial_bus.type_data_length),
- sizeof(struct aml_resource_common_serialbus)},
-
- /* Spi bus type specific */
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
- AML_OFFSET(spi_serial_bus.type_specific_flags),
- 0},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
- AML_OFFSET(spi_serial_bus.type_specific_flags),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
- AML_OFFSET(spi_serial_bus.data_bit_length),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
- AML_OFFSET(spi_serial_bus.clock_phase),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
- AML_OFFSET(spi_serial_bus.clock_polarity),
- 1},
-
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
- AML_OFFSET(spi_serial_bus.device_selection),
- 1},
-
- {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
- AML_OFFSET(spi_serial_bus.connection_speed),
- 1},
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_uart_serial_bus
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
- {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
- ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
- ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
-
- {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
- sizeof(struct aml_resource_uart_serialbus),
- 0},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
- AML_OFFSET(common_serial_bus.revision_id),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
- AML_OFFSET(common_serial_bus.type),
- 1},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
- AML_OFFSET(common_serial_bus.flags),
- 0},
-
- {ACPI_RSC_1BITFLAG,
- ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
- AML_OFFSET(common_serial_bus.flags),
- 1},
-
- {ACPI_RSC_MOVE8,
- ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
- AML_OFFSET(common_serial_bus.type_revision_id),
- 1},
-
- {ACPI_RSC_MOVE16,
- ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- 1},
-
- /* Vendor data */
-
- {ACPI_RSC_COUNT_SERIAL_VEN,
- ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- AML_RESOURCE_UART_MIN_DATA_LEN},
-
- {ACPI_RSC_MOVE_SERIAL_VEN,
- ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
- 0,
- sizeof(struct aml_resource_uart_serialbus)},
-
- /* Resource Source */
-
- {ACPI_RSC_MOVE8,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
- AML_OFFSET(common_serial_bus.res_source_index),
- 1},
-
- {ACPI_RSC_COUNT_SERIAL_RES,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
- AML_OFFSET(common_serial_bus.type_data_length),
- sizeof(struct aml_resource_common_serialbus)},
-
- {ACPI_RSC_MOVE_SERIAL_RES,
- ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
- AML_OFFSET(common_serial_bus.type_data_length),
- sizeof(struct aml_resource_common_serialbus)},
-
- /* Uart bus type specific */
-
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
- AML_OFFSET(uart_serial_bus.type_specific_flags),
- 0},
-
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
- AML_OFFSET(uart_serial_bus.type_specific_flags),
- 2},
-
- {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
- AML_OFFSET(uart_serial_bus.type_specific_flags),
- 4},
-
- {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
- AML_OFFSET(uart_serial_bus.type_specific_flags),
- 7},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
- AML_OFFSET(uart_serial_bus.parity),
- 1},
-
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
- AML_OFFSET(uart_serial_bus.lines_enabled),
- 1},
-
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
- AML_OFFSET(uart_serial_bus.rx_fifo_size),
- 1},
-
- {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
- AML_OFFSET(uart_serial_bus.tx_fifo_size),
- 1},
-
- {ACPI_RSC_MOVE32,
- ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
- AML_OFFSET(uart_serial_bus.default_baud_rate),
- 1},
-};
diff --git a/trunk/drivers/acpi/acpica/rsutils.c b/trunk/drivers/acpi/acpica/rsutils.c
index 433a375deb93..231811e56939 100644
--- a/trunk/drivers/acpi/acpica/rsutils.c
+++ b/trunk/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -144,9 +144,6 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* since there are no alignment or endian issues
*/
case ACPI_RSC_MOVE8:
- case ACPI_RSC_MOVE_GPIO_RES:
- case ACPI_RSC_MOVE_SERIAL_VEN:
- case ACPI_RSC_MOVE_SERIAL_RES:
ACPI_MEMCPY(destination, source, item_count);
return;
@@ -156,7 +153,6 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
- case ACPI_RSC_MOVE_GPIO_PIN:
ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
&ACPI_CAST_PTR(u16, source)[i]);
break;
@@ -592,56 +588,6 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
}
#endif /* ACPI_FUTURE_USAGE */
-/*******************************************************************************
- *
- * FUNCTION: acpi_rs_get_aei_method_data
- *
- * PARAMETERS: Node - Device node
- * ret_buffer - Pointer to a buffer structure for the
- * results
- *
- * RETURN: Status
- *
- * DESCRIPTION: This function is called to get the _AEI value of an object
- * contained in an object specified by the handle passed in
- *
- * If the function fails an appropriate status will be returned
- * and the contents of the callers buffer is undefined.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
- struct acpi_buffer *ret_buffer)
-{
- union acpi_operand_object *obj_desc;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
-
- /* Parameters guaranteed valid by caller */
-
- /* Execute the method, no parameters */
-
- status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
- ACPI_BTYPE_BUFFER, &obj_desc);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Make the call to create a resource linked list from the
- * byte stream buffer that comes back from the _CRS method
- * execution.
- */
- status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
-
- /* On exit, we must delete the object returned by evaluate_object */
-
- acpi_ut_remove_reference(obj_desc);
- return_ACPI_STATUS(status);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_method_data
diff --git a/trunk/drivers/acpi/acpica/rsxface.c b/trunk/drivers/acpi/acpica/rsxface.c
index f58c098c7aeb..fe86b37b16ce 100644
--- a/trunk/drivers/acpi/acpica/rsxface.c
+++ b/trunk/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -307,46 +307,6 @@ acpi_set_current_resources(acpi_handle device_handle,
ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
-/*******************************************************************************
- *
- * FUNCTION: acpi_get_event_resources
- *
- * PARAMETERS: device_handle - Handle to the device object for the
- * device we are getting resources
- * in_buffer - Pointer to a buffer containing the
- * resources to be set for the device
- *
- * RETURN: Status
- *
- * DESCRIPTION: This function is called to get the event resources for a
- * specific device. The caller must first acquire a handle for
- * the desired device. The resource data is passed to the routine
- * the buffer pointed to by the in_buffer variable. Uses the
- * _AEI method.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_event_resources(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer)
-{
- acpi_status status;
- struct acpi_namespace_node *node;
-
- ACPI_FUNCTION_TRACE(acpi_get_event_resources);
-
- /* Validate parameters then dispatch to internal routine */
-
- status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_rs_get_aei_method_data(node, ret_buffer);
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
-
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
@@ -526,9 +486,8 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want.
- * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * Name - Method name of the resources we want
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
* user_function - Called for each resource
* Context - Passed to user_function
*
@@ -555,12 +514,11 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
+ /* Get the _CRS or _PRS resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/trunk/drivers/acpi/acpica/tbfadt.c b/trunk/drivers/acpi/acpica/tbfadt.c
index c5d870406f41..6f5588e62c0a 100644
--- a/trunk/drivers/acpi/acpica/tbfadt.c
+++ b/trunk/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,15 +63,14 @@ static void acpi_tb_setup_fadt_registers(void);
typedef struct acpi_fadt_info {
char *name;
- u16 address64;
- u16 address32;
- u16 length;
+ u8 address64;
+ u8 address32;
+ u8 length;
u8 default_length;
u8 type;
} acpi_fadt_info;
-#define ACPI_FADT_OPTIONAL 0
#define ACPI_FADT_REQUIRED 1
#define ACPI_FADT_SEPARATE_LENGTH 2
@@ -88,7 +87,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_event_block),
ACPI_FADT_OFFSET(pm1_event_length),
ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
- ACPI_FADT_OPTIONAL},
+ 0},
{"Pm1aControlBlock",
ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -102,7 +101,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_control_block),
ACPI_FADT_OFFSET(pm1_control_length),
ACPI_PM1_REGISTER_WIDTH,
- ACPI_FADT_OPTIONAL},
+ 0},
{"Pm2ControlBlock",
ACPI_FADT_OFFSET(xpm2_control_block),
@@ -140,7 +139,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
typedef struct acpi_fadt_pm_info {
struct acpi_generic_address *target;
- u16 source;
+ u8 source;
u8 register_num;
} acpi_fadt_pm_info;
@@ -254,13 +253,8 @@ void acpi_tb_parse_fadt(u32 table_index)
acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (!acpi_gbl_reduced_hardware) {
- acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
- Xfacs, ACPI_SIG_FACS,
- ACPI_TABLE_INDEX_FACS);
- }
+ acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
+ ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
}
/*******************************************************************************
@@ -283,12 +277,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
* Check if the FADT is larger than the largest table that we expect
- * (the ACPI 5.0 version). If so, truncate the table, and issue
+ * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
* a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 5.0 version, "
+ "FADT (revision %u) is longer than ACPI 2.0 version, "
"truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
@@ -303,13 +297,6 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
ACPI_MEMCPY(&acpi_gbl_FADT, table,
ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
- /* Take a copy of the Hardware Reduced flag */
-
- acpi_gbl_reduced_hardware = FALSE;
- if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
- acpi_gbl_reduced_hardware = TRUE;
- }
-
/* Convert the local copy of the FADT to the common internal format */
acpi_tb_convert_fadt();
@@ -515,12 +502,6 @@ static void acpi_tb_validate_fadt(void)
acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
}
- /* If Hardware Reduced flag is set, we are all done */
-
- if (acpi_gbl_reduced_hardware) {
- return;
- }
-
/* Examine all of the 64-bit extended address fields (X fields) */
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/trunk/drivers/acpi/acpica/tbfind.c b/trunk/drivers/acpi/acpica/tbfind.c
index 4903e36ea75a..a55cb2bb5abb 100644
--- a/trunk/drivers/acpi/acpica/tbfind.c
+++ b/trunk/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/tbinstal.c b/trunk/drivers/acpi/acpica/tbinstal.c
index 1aecf7baa4e0..62365f6075dd 100644
--- a/trunk/drivers/acpi/acpica/tbinstal.c
+++ b/trunk/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/tbutils.c b/trunk/drivers/acpi/acpica/tbutils.c
index 09ca39e14337..0f2d395feaba 100644
--- a/trunk/drivers/acpi/acpica/tbutils.c
+++ b/trunk/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -135,13 +135,6 @@ acpi_status acpi_tb_initialize_facs(void)
{
acpi_status status;
- /* If Hardware Reduced flag is set, there is no FACS */
-
- if (acpi_gbl_reduced_hardware) {
- acpi_gbl_FACS = NULL;
- return (AE_OK);
- }
-
status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
ACPI_CAST_INDIRECT_PTR(struct
acpi_table_header,
diff --git a/trunk/drivers/acpi/acpica/tbxface.c b/trunk/drivers/acpi/acpica/tbxface.c
index abcc6412c244..e7d13f5d3f2d 100644
--- a/trunk/drivers/acpi/acpica/tbxface.c
+++ b/trunk/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/tbxfroot.c b/trunk/drivers/acpi/acpica/tbxfroot.c
index 4258f647ca3d..7eb6c6cc1edf 100644
--- a/trunk/drivers/acpi/acpica/tbxfroot.c
+++ b/trunk/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utaddress.c b/trunk/drivers/acpi/acpica/utaddress.c
deleted file mode 100644
index 67932aebe6dd..000000000000
--- a/trunk/drivers/acpi/acpica/utaddress.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utaddress - op_region address range check
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2012, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include
-#include "accommon.h"
-#include "acnamesp.h"
-
-#define _COMPONENT ACPI_UTILITIES
-ACPI_MODULE_NAME("utaddress")
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_add_address_range
- *
- * PARAMETERS: space_id - Address space ID
- * Address - op_region start address
- * Length - op_region length
- * region_node - op_region namespace node
- *
- * RETURN: Status
- *
- * DESCRIPTION: Add the Operation Region address range to the global list.
- * The only supported Space IDs are Memory and I/O. Called when
- * the op_region address/length operands are fully evaluated.
- *
- * MUTEX: Locks the namespace
- *
- * NOTE: Because this interface is only called when an op_region argument
- * list is evaluated, there cannot be any duplicate region_nodes.
- * Duplicate Address/Length values are allowed, however, so that multiple
- * address conflicts can be detected.
- *
- ******************************************************************************/
-acpi_status
-acpi_ut_add_address_range(acpi_adr_space_type space_id,
- acpi_physical_address address,
- u32 length, struct acpi_namespace_node *region_node)
-{
- struct acpi_address_range *range_info;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ut_add_address_range);
-
- if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
- (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Allocate/init a new info block, add it to the appropriate list */
-
- range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
- if (!range_info) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- range_info->start_address = address;
- range_info->end_address = (address + length - 1);
- range_info->region_node = region_node;
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- ACPI_FREE(range_info);
- return_ACPI_STATUS(status);
- }
-
- range_info->next = acpi_gbl_address_range_list[space_id];
- acpi_gbl_address_range_list[space_id] = range_info;
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
- acpi_ut_get_node_name(range_info->region_node),
- ACPI_CAST_PTR(void, address),
- ACPI_CAST_PTR(void, range_info->end_address)));
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_remove_address_range
- *
- * PARAMETERS: space_id - Address space ID
- * region_node - op_region namespace node
- *
- * RETURN: None
- *
- * DESCRIPTION: Remove the Operation Region from the global list. The only
- * supported Space IDs are Memory and I/O. Called when an
- * op_region is deleted.
- *
- * MUTEX: Assumes the namespace is locked
- *
- ******************************************************************************/
-
-void
-acpi_ut_remove_address_range(acpi_adr_space_type space_id,
- struct acpi_namespace_node *region_node)
-{
- struct acpi_address_range *range_info;
- struct acpi_address_range *prev;
-
- ACPI_FUNCTION_TRACE(ut_remove_address_range);
-
- if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
- (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- return_VOID;
- }
-
- /* Get the appropriate list head and check the list */
-
- range_info = prev = acpi_gbl_address_range_list[space_id];
- while (range_info) {
- if (range_info->region_node == region_node) {
- if (range_info == prev) { /* Found at list head */
- acpi_gbl_address_range_list[space_id] =
- range_info->next;
- } else {
- prev->next = range_info->next;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
- acpi_ut_get_node_name(range_info->
- region_node),
- ACPI_CAST_PTR(void,
- range_info->
- start_address),
- ACPI_CAST_PTR(void,
- range_info->
- end_address)));
-
- ACPI_FREE(range_info);
- return_VOID;
- }
-
- prev = range_info;
- range_info = range_info->next;
- }
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_check_address_range
- *
- * PARAMETERS: space_id - Address space ID
- * Address - Start address
- * Length - Length of address range
- * Warn - TRUE if warning on overlap desired
- *
- * RETURN: Count of the number of conflicts detected. Zero is always
- * returned for Space IDs other than Memory or I/O.
- *
- * DESCRIPTION: Check if the input address range overlaps any of the
- * ASL operation region address ranges. The only supported
- * Space IDs are Memory and I/O.
- *
- * MUTEX: Assumes the namespace is locked.
- *
- ******************************************************************************/
-
-u32
-acpi_ut_check_address_range(acpi_adr_space_type space_id,
- acpi_physical_address address, u32 length, u8 warn)
-{
- struct acpi_address_range *range_info;
- acpi_physical_address end_address;
- char *pathname;
- u32 overlap_count = 0;
-
- ACPI_FUNCTION_TRACE(ut_check_address_range);
-
- if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
- (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- return_UINT32(0);
- }
-
- range_info = acpi_gbl_address_range_list[space_id];
- end_address = address + length - 1;
-
- /* Check entire list for all possible conflicts */
-
- while (range_info) {
- /*
- * Check if the requested Address/Length overlaps this address_range.
- * Four cases to consider:
- *
- * 1) Input address/length is contained completely in the address range
- * 2) Input address/length overlaps range at the range start
- * 3) Input address/length overlaps range at the range end
- * 4) Input address/length completely encompasses the range
- */
- if ((address <= range_info->end_address) &&
- (end_address >= range_info->start_address)) {
-
- /* Found an address range overlap */
-
- overlap_count++;
- if (warn) { /* Optional warning message */
- pathname =
- acpi_ns_get_external_pathname(range_info->
- region_node);
-
- ACPI_WARNING((AE_INFO,
- "0x%p-0x%p %s conflicts with Region %s %d",
- ACPI_CAST_PTR(void, address),
- ACPI_CAST_PTR(void, end_address),
- acpi_ut_get_region_name(space_id),
- pathname, overlap_count));
- ACPI_FREE(pathname);
- }
- }
-
- range_info = range_info->next;
- }
-
- return_UINT32(overlap_count);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_delete_address_lists
- *
- * PARAMETERS: None
- *
- * RETURN: None
- *
- * DESCRIPTION: Delete all global address range lists (called during
- * subsystem shutdown).
- *
- ******************************************************************************/
-
-void acpi_ut_delete_address_lists(void)
-{
- struct acpi_address_range *next;
- struct acpi_address_range *range_info;
- int i;
-
- /* Delete all elements in all address range lists */
-
- for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
- next = acpi_gbl_address_range_list[i];
-
- while (next) {
- range_info = next;
- next = range_info->next;
- ACPI_FREE(range_info);
- }
-
- acpi_gbl_address_range_list[i] = NULL;
- }
-}
diff --git a/trunk/drivers/acpi/acpica/utalloc.c b/trunk/drivers/acpi/acpica/utalloc.c
index 9982d2ea66fb..0a697351cf69 100644
--- a/trunk/drivers/acpi/acpica/utalloc.c
+++ b/trunk/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utcopy.c b/trunk/drivers/acpi/acpica/utcopy.c
index 3317c0a406ee..aded299a2fa8 100644
--- a/trunk/drivers/acpi/acpica/utcopy.c
+++ b/trunk/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utdebug.c b/trunk/drivers/acpi/acpica/utdebug.c
index a0998a886318..a1f8d7509e66 100644
--- a/trunk/drivers/acpi/acpica/utdebug.c
+++ b/trunk/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utdecode.c b/trunk/drivers/acpi/acpica/utdecode.c
index d42ede5260c7..8b087e2d64f4 100644
--- a/trunk/drivers/acpi/acpica/utdecode.c
+++ b/trunk/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -171,9 +171,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI",
- "GeneralPurposeIo",
- "GenericSerialBus"
+ "IPMI"
};
char *acpi_ut_get_region_name(u8 space_id)
diff --git a/trunk/drivers/acpi/acpica/utdelete.c b/trunk/drivers/acpi/acpica/utdelete.c
index 2a6c3e183697..31f5a7832ef1 100644
--- a/trunk/drivers/acpi/acpica/utdelete.c
+++ b/trunk/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -215,14 +215,11 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Region %p\n", object));
- /*
- * Update address_range list. However, only permanent regions
- * are installed in this list. (Not created within a method)
- */
- if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
- acpi_ut_remove_address_range(object->region.space_id,
- object->region.node);
- }
+ /* Invalidate the region address/length via the host OS */
+
+ acpi_os_invalidate_address(object->region.space_id,
+ object->region.address,
+ (acpi_size) object->region.length);
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
diff --git a/trunk/drivers/acpi/acpica/uteval.c b/trunk/drivers/acpi/acpica/uteval.c
index 479f32b33415..18f73c9d10bc 100644
--- a/trunk/drivers/acpi/acpica/uteval.c
+++ b/trunk/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utglobal.c b/trunk/drivers/acpi/acpica/utglobal.c
index 4153584cf526..ffba0a39c3e8 100644
--- a/trunk/drivers/acpi/acpica/utglobal.c
+++ b/trunk/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,12 +264,6 @@ acpi_status acpi_ut_init_globals(void)
return_ACPI_STATUS(status);
}
- /* Address Range lists */
-
- for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
- acpi_gbl_address_range_list[i] = NULL;
- }
-
/* Mutex locked flags */
for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/trunk/drivers/acpi/acpica/utids.c b/trunk/drivers/acpi/acpica/utids.c
index c92eb1d93785..b679ea693545 100644
--- a/trunk/drivers/acpi/acpica/utids.c
+++ b/trunk/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utinit.c b/trunk/drivers/acpi/acpica/utinit.c
index 8359c0c5dc98..191b6828cce9 100644
--- a/trunk/drivers/acpi/acpica/utinit.c
+++ b/trunk/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,7 +92,6 @@ static void acpi_ut_terminate(void)
gpe_xrupt_info = next_gpe_xrupt_info;
}
- acpi_ut_delete_address_lists();
return_VOID;
}
diff --git a/trunk/drivers/acpi/acpica/utlock.c b/trunk/drivers/acpi/acpica/utlock.c
index 155fd786d0f2..f6bb75c6faf5 100644
--- a/trunk/drivers/acpi/acpica/utlock.c
+++ b/trunk/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utmath.c b/trunk/drivers/acpi/acpica/utmath.c
index 2491a552b0e6..ce481da9bb45 100644
--- a/trunk/drivers/acpi/acpica/utmath.c
+++ b/trunk/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utmisc.c b/trunk/drivers/acpi/acpica/utmisc.c
index 86f19db74e05..c33a852d4f42 100644
--- a/trunk/drivers/acpi/acpica/utmisc.c
+++ b/trunk/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utmutex.c b/trunk/drivers/acpi/acpica/utmutex.c
index 43174df33121..7d797e2baecd 100644
--- a/trunk/drivers/acpi/acpica/utmutex.c
+++ b/trunk/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -293,10 +293,14 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
+ acpi_thread_id this_thread_id;
+
ACPI_FUNCTION_NAME(ut_release_mutex);
+ this_thread_id = acpi_os_get_thread_id();
+
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
- (u32)acpi_os_get_thread_id(),
+ (u32)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
@@ -325,8 +329,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
- if (acpi_gbl_mutex_info[i].thread_id ==
- acpi_os_get_thread_id()) {
+ if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
continue;
}
diff --git a/trunk/drivers/acpi/acpica/utobject.c b/trunk/drivers/acpi/acpica/utobject.c
index b112744fc9ae..188340a017b4 100644
--- a/trunk/drivers/acpi/acpica/utobject.c
+++ b/trunk/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utosi.c b/trunk/drivers/acpi/acpica/utosi.c
index 2360cf70c18c..1fb10cb8f11d 100644
--- a/trunk/drivers/acpi/acpica/utosi.c
+++ b/trunk/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utresrc.c b/trunk/drivers/acpi/acpica/utresrc.c
index 9d441ea70305..6ffd3a8bdaa5 100644
--- a/trunk/drivers/acpi/acpica/utresrc.c
+++ b/trunk/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
#include
#include "accommon.h"
-#include "acresrc.h"
+#include "amlresrc.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
@@ -154,138 +154,6 @@ const char *acpi_gbl_typ_decode[] = {
"TypeF"
};
-const char *acpi_gbl_ppc_decode[] = {
- "PullDefault",
- "PullUp",
- "PullDown",
- "PullNone"
-};
-
-const char *acpi_gbl_ior_decode[] = {
- "IoRestrictionNone",
- "IoRestrictionInputOnly",
- "IoRestrictionOutputOnly",
- "IoRestrictionNoneAndPreserve"
-};
-
-const char *acpi_gbl_dts_decode[] = {
- "Width8bit",
- "Width16bit",
- "Width32bit",
- "Width64bit",
- "Width128bit",
- "Width256bit",
-};
-
-/* GPIO connection type */
-
-const char *acpi_gbl_ct_decode[] = {
- "Interrupt",
- "I/O"
-};
-
-/* Serial bus type */
-
-const char *acpi_gbl_sbt_decode[] = {
- "/* UNKNOWN serial bus type */",
- "I2C",
- "SPI",
- "UART"
-};
-
-/* I2C serial bus access mode */
-
-const char *acpi_gbl_am_decode[] = {
- "AddressingMode7Bit",
- "AddressingMode10Bit"
-};
-
-/* I2C serial bus slave mode */
-
-const char *acpi_gbl_sm_decode[] = {
- "ControllerInitiated",
- "DeviceInitiated"
-};
-
-/* SPI serial bus wire mode */
-
-const char *acpi_gbl_wm_decode[] = {
- "FourWireMode",
- "ThreeWireMode"
-};
-
-/* SPI serial clock phase */
-
-const char *acpi_gbl_cph_decode[] = {
- "ClockPhaseFirst",
- "ClockPhaseSecond"
-};
-
-/* SPI serial bus clock polarity */
-
-const char *acpi_gbl_cpo_decode[] = {
- "ClockPolarityLow",
- "ClockPolarityHigh"
-};
-
-/* SPI serial bus device polarity */
-
-const char *acpi_gbl_dp_decode[] = {
- "PolarityLow",
- "PolarityHigh"
-};
-
-/* UART serial bus endian */
-
-const char *acpi_gbl_ed_decode[] = {
- "LittleEndian",
- "BigEndian"
-};
-
-/* UART serial bus bits per byte */
-
-const char *acpi_gbl_bpb_decode[] = {
- "DataBitsFive",
- "DataBitsSix",
- "DataBitsSeven",
- "DataBitsEight",
- "DataBitsNine",
- "/* UNKNOWN Bits per byte */",
- "/* UNKNOWN Bits per byte */",
- "/* UNKNOWN Bits per byte */"
-};
-
-/* UART serial bus stop bits */
-
-const char *acpi_gbl_sb_decode[] = {
- "StopBitsNone",
- "StopBitsOne",
- "StopBitsOnePlusHalf",
- "StopBitsTwo"
-};
-
-/* UART serial bus flow control */
-
-const char *acpi_gbl_fc_decode[] = {
- "FlowControlNone",
- "FlowControlHardware",
- "FlowControlXON",
- "/* UNKNOWN flow control keyword */"
-};
-
-/* UART serial bus parity type */
-
-const char *acpi_gbl_pt_decode[] = {
- "ParityTypeNone",
- "ParityTypeEven",
- "ParityTypeOdd",
- "ParityTypeMark",
- "ParityTypeSpace",
- "/* UNKNOWN parity keyword */",
- "/* UNKNOWN parity keyword */",
- "/* UNKNOWN parity keyword */"
-};
-
#endif
/*
@@ -305,7 +173,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
- ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
+ 0,
0,
0,
0,
@@ -325,17 +193,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
- ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
- ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
- 0,
- ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
-};
-
-const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
- 0,
- ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
- ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
- ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
};
/*
@@ -351,49 +209,35 @@ static const u8 acpi_gbl_resource_types[] = {
0,
0,
0,
- ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
- ACPI_FIXED_LENGTH, /* 05 DMA */
- ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
- ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
- ACPI_FIXED_LENGTH, /* 08 IO */
- ACPI_FIXED_LENGTH, /* 09 fixed_iO */
- ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
+ ACPI_SMALL_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_SMALL_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ 0,
0,
0,
0,
- ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
- ACPI_FIXED_LENGTH, /* 0_f end_tag */
+ ACPI_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
/* Large descriptors */
0,
- ACPI_FIXED_LENGTH, /* 01 Memory24 */
- ACPI_FIXED_LENGTH, /* 02 generic_register */
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
0,
- ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
- ACPI_FIXED_LENGTH, /* 05 Memory32 */
- ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
- ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
- ACPI_VARIABLE_LENGTH, /* 08 Word* address */
- ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
- ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
- ACPI_FIXED_LENGTH, /* 0_b Extended* address */
- ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
- 0,
- ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
+ ACPI_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_VARIABLE_LENGTH,
+ ACPI_FIXED_LENGTH
};
-/*
- * For the i_aSL compiler/disassembler, we don't want any error messages
- * because the disassembler uses the resource validation code to determine
- * if Buffer objects are actually Resource Templates.
- */
-#ifdef ACPI_ASL_COMPILER
-#define ACPI_RESOURCE_ERROR(plist)
-#else
-#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
@@ -421,7 +265,6 @@ acpi_ut_walk_aml_resources(u8 * aml,
u8 resource_index;
u32 length;
u32 offset = 0;
- u8 end_tag[2] = { 0x79, 0x00 };
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
@@ -443,10 +286,6 @@ acpi_ut_walk_aml_resources(u8 * aml,
status = acpi_ut_validate_resource(aml, &resource_index);
if (ACPI_FAILURE(status)) {
- /*
- * Exit on failure. Cannot continue because the descriptor length
- * may be bogus also.
- */
return_ACPI_STATUS(status);
}
@@ -461,7 +300,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
}
@@ -494,19 +333,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Did not find an end_tag descriptor */
- if (user_function) {
-
- /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
-
- (void)acpi_ut_validate_resource(end_tag, &resource_index);
- status =
- user_function(end_tag, 2, offset, resource_index, context);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ return (AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
@@ -527,7 +354,6 @@ acpi_ut_walk_aml_resources(u8 * aml,
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
{
- union aml_resource *aml_resource;
u8 resource_type;
u8 resource_index;
acpi_rs_length resource_length;
@@ -549,7 +375,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
- goto invalid_resource;
+ return (AE_AML_INVALID_RESOURCE_TYPE);
}
/*
@@ -566,17 +392,15 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
}
- /*
- * Check validity of the resource type, via acpi_gbl_resource_types. Zero
- * indicates an invalid resource.
- */
+ /* Check validity of the resource type, zero indicates name is invalid */
+
if (!acpi_gbl_resource_types[resource_index]) {
- goto invalid_resource;
+ return (AE_AML_INVALID_RESOURCE_TYPE);
}
/*
- * Validate the resource_length field. This ensures that the length
- * is at least reasonable, and guarantees that it is non-zero.
+ * 2) Validate the resource_length field. This ensures that the length
+ * is at least reasonable, and guarantees that it is non-zero.
*/
resource_length = acpi_ut_get_resource_length(aml);
minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -589,7 +413,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Fixed length resource, length must match exactly */
if (resource_length != minimum_resource_length) {
- goto bad_resource_length;
+ return (AE_AML_BAD_RESOURCE_LENGTH);
}
break;
@@ -598,7 +422,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Variable length resource, length must be at least the minimum */
if (resource_length < minimum_resource_length) {
- goto bad_resource_length;
+ return (AE_AML_BAD_RESOURCE_LENGTH);
}
break;
@@ -608,7 +432,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
if ((resource_length > minimum_resource_length) ||
(resource_length < (minimum_resource_length - 1))) {
- goto bad_resource_length;
+ return (AE_AML_BAD_RESOURCE_LENGTH);
}
break;
@@ -616,23 +440,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Shouldn't happen (because of validation earlier), but be sure */
- goto invalid_resource;
- }
-
- aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
- if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Validate the bus_type field */
-
- if ((aml_resource->common_serial_bus.type == 0) ||
- (aml_resource->common_serial_bus.type >
- AML_RESOURCE_MAX_SERIALBUSTYPE)) {
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- aml_resource->common_serial_bus.
- type));
- return (AE_AML_INVALID_RESOURCE_TYPE);
- }
+ return (AE_AML_INVALID_RESOURCE_TYPE);
}
/* Optionally return the resource table index */
@@ -642,22 +450,6 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
}
return (AE_OK);
-
- invalid_resource:
-
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid/unsupported resource descriptor: Type 0x%2.2X",
- resource_type));
- return (AE_AML_INVALID_RESOURCE_TYPE);
-
- bad_resource_length:
-
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid resource descriptor length: Type "
- "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
- resource_type, resource_length,
- minimum_resource_length));
- return (AE_AML_BAD_RESOURCE_LENGTH);
}
/*******************************************************************************
diff --git a/trunk/drivers/acpi/acpica/utstate.c b/trunk/drivers/acpi/acpica/utstate.c
index 4267477c2797..30c21e1a9360 100644
--- a/trunk/drivers/acpi/acpica/utstate.c
+++ b/trunk/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utxface.c b/trunk/drivers/acpi/acpica/utxface.c
index 644e8c8ebc4b..420ebfe08c72 100644
--- a/trunk/drivers/acpi/acpica/utxface.c
+++ b/trunk/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,6 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "actables.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
@@ -641,41 +640,4 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
}
ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
-
-/*****************************************************************************
- *
- * FUNCTION: acpi_check_address_range
- *
- * PARAMETERS: space_id - Address space ID
- * Address - Start address
- * Length - Length
- * Warn - TRUE if warning on overlap desired
- *
- * RETURN: Count of the number of conflicts detected.
- *
- * DESCRIPTION: Check if the input address range overlaps any of the
- * ASL operation region address ranges.
- *
- ****************************************************************************/
-u32
-acpi_check_address_range(acpi_adr_space_type space_id,
- acpi_physical_address address,
- acpi_size length, u8 warn)
-{
- u32 overlaps;
- acpi_status status;
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return (0);
- }
-
- overlaps = acpi_ut_check_address_range(space_id, address,
- (u32)length, warn);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return (overlaps);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
diff --git a/trunk/drivers/acpi/acpica/utxferror.c b/trunk/drivers/acpi/acpica/utxferror.c
index 52b568af1819..8d0245ec4315 100644
--- a/trunk/drivers/acpi/acpica/utxferror.c
+++ b/trunk/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/trunk/drivers/acpi/acpica/utxfmutex.c b/trunk/drivers/acpi/acpica/utxfmutex.c
deleted file mode 100644
index 1427d191d15a..000000000000
--- a/trunk/drivers/acpi/acpica/utxfmutex.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utxfmutex - external AML mutex access functions
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2012, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include
-#include "accommon.h"
-#include "acnamesp.h"
-
-#define _COMPONENT ACPI_UTILITIES
-ACPI_MODULE_NAME("utxfmutex")
-
-/* Local prototypes */
-static acpi_status
-acpi_ut_get_mutex_object(acpi_handle handle,
- acpi_string pathname,
- union acpi_operand_object **ret_obj);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_get_mutex_object
- *
- * PARAMETERS: Handle - Mutex or prefix handle (optional)
- * Pathname - Mutex pathname (optional)
- * ret_obj - Where the mutex object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
- * Handle:Pathname. Either Handle or Pathname can be NULL, but
- * not both.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_get_mutex_object(acpi_handle handle,
- acpi_string pathname,
- union acpi_operand_object **ret_obj)
-{
- struct acpi_namespace_node *mutex_node;
- union acpi_operand_object *mutex_obj;
- acpi_status status;
-
- /* Parameter validation */
-
- if (!ret_obj || (!handle && !pathname)) {
- return (AE_BAD_PARAMETER);
- }
-
- /* Get a the namespace node for the mutex */
-
- mutex_node = handle;
- if (pathname != NULL) {
- status = acpi_get_handle(handle, pathname,
- ACPI_CAST_PTR(acpi_handle,
- &mutex_node));
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
-
- /* Ensure that we actually have a Mutex object */
-
- if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
- return (AE_TYPE);
- }
-
- /* Get the low-level mutex object */
-
- mutex_obj = acpi_ns_get_attached_object(mutex_node);
- if (!mutex_obj) {
- return (AE_NULL_OBJECT);
- }
-
- *ret_obj = mutex_obj;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_acquire_mutex
- *
- * PARAMETERS: Handle - Mutex or prefix handle (optional)
- * Pathname - Mutex pathname (optional)
- * Timeout - Max time to wait for the lock (millisec)
- *
- * RETURN: Status
- *
- * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
- * AML mutex objects, and allows for transaction locking between
- * drivers and AML code. The mutex node is pointed to by
- * Handle:Pathname. Either Handle or Pathname can be NULL, but
- * not both.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
-{
- acpi_status status;
- union acpi_operand_object *mutex_obj;
-
- /* Get the low-level mutex associated with Handle:Pathname */
-
- status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Acquire the OS mutex */
-
- status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
- return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_release_mutex
- *
- * PARAMETERS: Handle - Mutex or prefix handle (optional)
- * Pathname - Mutex pathname (optional)
- *
- * RETURN: Status
- *
- * DESCRIPTION: Release an AML mutex. This is a device driver interface to
- * AML mutex objects, and allows for transaction locking between
- * drivers and AML code. The mutex node is pointed to by
- * Handle:Pathname. Either Handle or Pathname can be NULL, but
- * not both.
- *
- ******************************************************************************/
-
-acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
-{
- acpi_status status;
- union acpi_operand_object *mutex_obj;
-
- /* Get the low-level mutex associated with Handle:Pathname */
-
- status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Release the OS mutex */
-
- acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
- return (AE_OK);
-}
diff --git a/trunk/drivers/acpi/apei/apei-base.c b/trunk/drivers/acpi/apei/apei-base.c
index e45350cb6ac8..61540360d5ce 100644
--- a/trunk/drivers/acpi/apei/apei-base.c
+++ b/trunk/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
#include
#include
#include
-#include
#include
#include
#include
#include
#include
#include
+#include
#include "apei-internal.h"
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
{
int rc;
- rc = apei_read(val, &entry->register_region);
+ rc = acpi_atomic_read(val, &entry->register_region);
if (rc)
return rc;
*val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
val <<= entry->register_region.bit_offset;
if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
u64 valr = 0;
- rc = apei_read(&valr, &entry->register_region);
+ rc = acpi_atomic_read(&valr, &entry->register_region);
if (rc)
return rc;
valr &= ~(entry->mask << entry->register_region.bit_offset);
val |= valr;
}
- rc = apei_write(val, &entry->register_region);
+ rc = acpi_atomic_write(val, &entry->register_region);
return rc;
}
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_os_map_generic_address(&entry->register_region);
+ return acpi_pre_map_gar(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_os_unmap_generic_address(&entry->register_region);
+ acpi_post_unmap_gar(&entry->register_region);
return 0;
}
@@ -421,17 +421,6 @@ static int apei_resources_merge(struct apei_resources *resources1,
return 0;
}
-int apei_resources_add(struct apei_resources *resources,
- unsigned long start, unsigned long size,
- bool iomem)
-{
- if (iomem)
- return apei_res_add(&resources->iomem, start, size);
- else
- return apei_res_add(&resources->ioport, start, size);
-}
-EXPORT_SYMBOL_GPL(apei_resources_add);
-
/*
* EINJ has two groups of GARs (EINJ table entry and trigger table
* entry), so common resources are subtracted from the trigger table
@@ -449,19 +438,8 @@ int apei_resources_sub(struct apei_resources *resources1,
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
-static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
-{
- struct apei_resources *resources = data;
- return apei_res_add(&resources->iomem, start, size);
-}
-
-static int apei_get_nvs_resources(struct apei_resources *resources)
-{
- return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
-}
-
/*
- * IO memory/port resource management mechanism is used to check
+ * IO memory/port rersource management mechanism is used to check
* whether memory/port area used by GARs conflicts with normal memory
* or IO memory/port of devices.
*/
@@ -470,35 +448,21 @@ int apei_resources_request(struct apei_resources *resources,
{
struct apei_res *res, *res_bak = NULL;
struct resource *r;
- struct apei_resources nvs_resources;
int rc;
rc = apei_resources_sub(resources, &apei_resources_all);
if (rc)
return rc;
- /*
- * Some firmware uses ACPI NVS region, that has been marked as
- * busy, so exclude it from APEI resources to avoid false
- * conflict.
- */
- apei_resources_init(&nvs_resources);
- rc = apei_get_nvs_resources(&nvs_resources);
- if (rc)
- goto res_fini;
- rc = apei_resources_sub(resources, &nvs_resources);
- if (rc)
- goto res_fini;
-
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request [mem %#010llx-%#010llx] for %s registers\n",
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
(unsigned long long)res->start,
- (unsigned long long)res->end - 1, desc);
+ (unsigned long long)res->end);
res_bak = res;
goto err_unmap_iomem;
}
@@ -508,9 +472,9 @@ int apei_resources_request(struct apei_resources *resources,
r = request_region(res->start, res->end - res->start, desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request [io %#06llx-%#06llx] for %s registers\n",
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
(unsigned long long)res->start,
- (unsigned long long)res->end - 1, desc);
+ (unsigned long long)res->end);
res_bak = res;
goto err_unmap_ioport;
}
@@ -536,8 +500,6 @@ int apei_resources_request(struct apei_resources *resources,
break;
release_mem_region(res->start, res->end - res->start);
}
-res_fini:
- apei_resources_fini(&nvs_resources);
return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -591,96 +553,6 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
return 0;
}
-/* read GAR in interrupt (including NMI) or process context */
-int apei_read(u64 *val, struct acpi_generic_address *reg)
-{
- int rc;
- u64 address;
- u32 tmp, width = reg->bit_width;
- acpi_status status;
-
- rc = apei_check_gar(reg, &address);
- if (rc)
- return rc;
-
- if (width == 64)
- width = 32; /* Break into two 32-bit transfers */
-
- *val = 0;
- switch(reg->space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- status = acpi_os_read_memory((acpi_physical_address)
- address, &tmp, width);
- if (ACPI_FAILURE(status))
- return -EIO;
- *val = tmp;
-
- if (reg->bit_width == 64) {
- /* Read the top 32 bits */
- status = acpi_os_read_memory((acpi_physical_address)
- (address + 4), &tmp, 32);
- if (ACPI_FAILURE(status))
- return -EIO;
- *val |= ((u64)tmp << 32);
- }
- break;
- case ACPI_ADR_SPACE_SYSTEM_IO:
- status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
- if (ACPI_FAILURE(status))
- return -EIO;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(apei_read);
-
-/* write GAR in interrupt (including NMI) or process context */
-int apei_write(u64 val, struct acpi_generic_address *reg)
-{
- int rc;
- u64 address;
- u32 width = reg->bit_width;
- acpi_status status;
-
- rc = apei_check_gar(reg, &address);
- if (rc)
- return rc;
-
- if (width == 64)
- width = 32; /* Break into two 32-bit transfers */
-
- switch (reg->space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- status = acpi_os_write_memory((acpi_physical_address)
- address, ACPI_LODWORD(val),
- width);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (reg->bit_width == 64) {
- status = acpi_os_write_memory((acpi_physical_address)
- (address + 4),
- ACPI_HIDWORD(val), 32);
- if (ACPI_FAILURE(status))
- return -EIO;
- }
- break;
- case ACPI_ADR_SPACE_SYSTEM_IO:
- status = acpi_os_write_port(address, val, reg->bit_width);
- if (ACPI_FAILURE(status))
- return -EIO;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(apei_write);
-
static int collect_res_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
diff --git a/trunk/drivers/acpi/apei/apei-internal.h b/trunk/drivers/acpi/apei/apei-internal.h
index cca240a33038..f57050e7a5e7 100644
--- a/trunk/drivers/acpi/apei/apei-internal.h
+++ b/trunk/drivers/acpi/apei/apei-internal.h
@@ -68,9 +68,6 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
-int apei_read(u64 *val, struct acpi_generic_address *reg);
-int apei_write(u64 val, struct acpi_generic_address *reg);
-
int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -98,9 +95,6 @@ static inline void apei_resources_init(struct apei_resources *resources)
}
void apei_resources_fini(struct apei_resources *resources);
-int apei_resources_add(struct apei_resources *resources,
- unsigned long start, unsigned long size,
- bool iomem);
int apei_resources_sub(struct apei_resources *resources1,
struct apei_resources *resources2);
int apei_resources_request(struct apei_resources *resources,
diff --git a/trunk/drivers/acpi/apei/einj.c b/trunk/drivers/acpi/apei/einj.c
index 5b898d4dda99..589b96c38704 100644
--- a/trunk/drivers/acpi/apei/einj.c
+++ b/trunk/drivers/acpi/apei/einj.c
@@ -42,42 +42,6 @@
/* Firmware should respond within 1 milliseconds */
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
-/*
- * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
- */
-static int acpi5;
-
-struct set_error_type_with_address {
- u32 type;
- u32 vendor_extension;
- u32 flags;
- u32 apicid;
- u64 memory_address;
- u64 memory_address_range;
- u32 pcie_sbdf;
-};
-enum {
- SETWA_FLAGS_APICID = 1,
- SETWA_FLAGS_MEM = 2,
- SETWA_FLAGS_PCIE_SBDF = 4,
-};
-
-/*
- * Vendor extensions for platform specific operations
- */
-struct vendor_error_type_extension {
- u32 length;
- u32 pcie_sbdf;
- u16 vendor_id;
- u16 device_id;
- u8 rev_id;
- u8 reserved[3];
-};
-
-static u32 vendor_flags;
-static struct debugfs_blob_wrapper vendor_blob;
-static char vendor_dev[64];
-
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -139,14 +103,7 @@ static struct apei_exec_ins_type einj_ins_type[] = {
*/
static DEFINE_MUTEX(einj_mutex);
-static void *einj_param;
-
-#ifndef readq
-static inline __u64 readq(volatile void __iomem *addr)
-{
- return ((__u64)readl(addr+4) << 32) + readl(addr);
-}
-#endif
+static struct einj_parameter *einj_param;
#ifndef writeq
static inline void writeq(__u64 val, volatile void __iomem *addr)
@@ -201,31 +158,10 @@ static int einj_timedout(u64 *t)
return 0;
}
-static void check_vendor_extension(u64 paddr,
- struct set_error_type_with_address *v5param)
-{
- int offset = readl(&v5param->vendor_extension);
- struct vendor_error_type_extension *v;
- u32 sbdf;
-
- if (!offset)
- return;
- v = ioremap(paddr + offset, sizeof(*v));
- if (!v)
- return;
- sbdf = readl(&v->pcie_sbdf);
- sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
- sbdf >> 24, (sbdf >> 16) & 0xff,
- (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
- readw(&v->vendor_id), readw(&v->device_id),
- readb(&v->rev_id));
- iounmap(v);
-}
-
-static void *einj_get_parameter_address(void)
+static u64 einj_get_parameter_address(void)
{
int i;
- u64 paddrv4 = 0, paddrv5 = 0;
+ u64 paddr = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
@@ -234,40 +170,12 @@ static void *einj_get_parameter_address(void)
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddrv4, &entry->register_region.address,
- sizeof(paddrv4));
- if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
- entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
- entry->register_region.space_id ==
- ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddrv5, &entry->register_region.address,
- sizeof(paddrv5));
+ memcpy(&paddr, &entry->register_region.address,
+ sizeof(paddr));
entry++;
}
- if (paddrv5) {
- struct set_error_type_with_address *v5param;
-
- v5param = ioremap(paddrv5, sizeof(*v5param));
- if (v5param) {
- acpi5 = 1;
- check_vendor_extension(paddrv5, v5param);
- return v5param;
- }
- }
- if (paddrv4) {
- struct einj_parameter *v4param;
-
- v4param = ioremap(paddrv4, sizeof(*v4param));
- if (!v4param)
- return 0;
- if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) {
- iounmap(v4param);
- return 0;
- }
- return v4param;
- }
- return 0;
+ return paddr;
}
/* do sanity check to trigger table */
@@ -286,29 +194,8 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
return 0;
}
-static struct acpi_generic_address *einj_get_trigger_parameter_region(
- struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
-{
- int i;
- struct acpi_whea_header *entry;
-
- entry = (struct acpi_whea_header *)
- ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
- for (i = 0; i < trigger_tab->entry_count; i++) {
- if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
- entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
- entry->register_region.space_id ==
- ACPI_ADR_SPACE_SYSTEM_MEMORY &&
- (entry->register_region.address & param2) == (param1 & param2))
- return &entry->register_region;
- entry++;
- }
-
- return NULL;
-}
/* Execute instructions in trigger error action table */
-static int __einj_error_trigger(u64 trigger_paddr, u32 type,
- u64 param1, u64 param2)
+static int __einj_error_trigger(u64 trigger_paddr)
{
struct acpi_einj_trigger *trigger_tab = NULL;
struct apei_exec_context trigger_ctx;
@@ -317,16 +204,14 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
struct resource *r;
u32 table_size;
int rc = -EIO;
- struct acpi_generic_address *trigger_param_region = NULL;
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
- "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
(unsigned long long)trigger_paddr,
- (unsigned long long)trigger_paddr +
- sizeof(*trigger_tab) - 1);
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
goto out;
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -347,9 +232,9 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
-"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
- (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
- (unsigned long long)trigger_paddr + table_size - 1);
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
goto out_rel_header;
}
iounmap(trigger_tab);
@@ -370,30 +255,6 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
rc = apei_resources_sub(&trigger_resources, &einj_resources);
if (rc)
goto out_fini;
- /*
- * Some firmware will access target address specified in
- * param1 to trigger the error when injecting memory error.
- * This will cause resource conflict with regular memory. So
- * remove it from trigger table resources.
- */
- if (param_extension && (type & 0x0038) && param2) {
- struct apei_resources addr_resources;
- apei_resources_init(&addr_resources);
- trigger_param_region = einj_get_trigger_parameter_region(
- trigger_tab, param1, param2);
- if (trigger_param_region) {
- rc = apei_resources_add(&addr_resources,
- trigger_param_region->address,
- trigger_param_region->bit_width/8, true);
- if (rc)
- goto out_fini;
- rc = apei_resources_sub(&trigger_resources,
- &addr_resources);
- }
- apei_resources_fini(&addr_resources);
- if (rc)
- goto out_fini;
- }
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
if (rc)
goto out_fini;
@@ -432,56 +293,12 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
- if (acpi5) {
- struct set_error_type_with_address *v5param = einj_param;
-
- writel(type, &v5param->type);
- if (type & 0x80000000) {
- switch (vendor_flags) {
- case SETWA_FLAGS_APICID:
- writel(param1, &v5param->apicid);
- break;
- case SETWA_FLAGS_MEM:
- writeq(param1, &v5param->memory_address);
- writeq(param2, &v5param->memory_address_range);
- break;
- case SETWA_FLAGS_PCIE_SBDF:
- writel(param1, &v5param->pcie_sbdf);
- break;
- }
- writel(vendor_flags, &v5param->flags);
- } else {
- switch (type) {
- case ACPI_EINJ_PROCESSOR_CORRECTABLE:
- case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
- case ACPI_EINJ_PROCESSOR_FATAL:
- writel(param1, &v5param->apicid);
- writel(SETWA_FLAGS_APICID, &v5param->flags);
- break;
- case ACPI_EINJ_MEMORY_CORRECTABLE:
- case ACPI_EINJ_MEMORY_UNCORRECTABLE:
- case ACPI_EINJ_MEMORY_FATAL:
- writeq(param1, &v5param->memory_address);
- writeq(param2, &v5param->memory_address_range);
- writel(SETWA_FLAGS_MEM, &v5param->flags);
- break;
- case ACPI_EINJ_PCIX_CORRECTABLE:
- case ACPI_EINJ_PCIX_UNCORRECTABLE:
- case ACPI_EINJ_PCIX_FATAL:
- writel(param1, &v5param->pcie_sbdf);
- writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags);
- break;
- }
- }
- } else {
- rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
- if (rc)
- return rc;
- if (einj_param) {
- struct einj_parameter *v4param = einj_param;
- writeq(param1, &v4param->param1);
- writeq(param2, &v4param->param2);
- }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ writeq(param1, &einj_param->param1);
+ writeq(param2, &einj_param->param2);
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
if (rc)
@@ -507,7 +324,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
trigger_paddr = apei_exec_ctx_get_output(&ctx);
- rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
+ rc = __einj_error_trigger(trigger_paddr);
if (rc)
return rc;
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -591,25 +408,15 @@ static int error_type_set(void *data, u64 val)
{
int rc;
u32 available_error_type = 0;
- u32 tval, vendor;
-
- /*
- * Vendor defined types have 0x80000000 bit set, and
- * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
- */
- vendor = val & 0x80000000;
- tval = val & 0x7fffffff;
/* Only one error type can be specified */
- if (tval & (tval - 1))
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
return -EINVAL;
- if (!vendor) {
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
- if (!(val & available_error_type))
- return -EINVAL;
- }
error_type = val;
return 0;
@@ -648,6 +455,7 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
static int __init einj_init(void)
{
int rc;
+ u64 param_paddr;
acpi_status status;
struct dentry *fentry;
struct apei_exec_context ctx;
@@ -657,9 +465,10 @@ static int __init einj_init(void)
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
- if (status == AE_NOT_FOUND)
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
return -ENODEV;
- else if (ACPI_FAILURE(status)) {
+ } else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
return -EINVAL;
@@ -700,30 +509,23 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
-
- einj_param = einj_get_parameter_address();
- if ((param_extension || acpi5) && einj_param) {
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_unmap;
- }
-
- if (vendor_dev[0]) {
- vendor_blob.data = vendor_dev;
- vendor_blob.size = strlen(vendor_dev);
- fentry = debugfs_create_blob("vendor", S_IRUSR,
- einj_debug_dir, &vendor_blob);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
- einj_debug_dir, &vendor_flags);
- if (!fentry)
- goto err_unmap;
+ if (param_extension) {
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ } else
+ pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
diff --git a/trunk/drivers/acpi/apei/erst.c b/trunk/drivers/acpi/apei/erst.c
index eb9fab5b96e4..6a9e3bad13f4 100644
--- a/trunk/drivers/acpi/apei/erst.c
+++ b/trunk/drivers/acpi/apei/erst.c
@@ -1127,9 +1127,10 @@ static int __init erst_init(void)
status = acpi_get_table(ACPI_SIG_ERST, 0,
(struct acpi_table_header **)&erst_tab);
- if (status == AE_NOT_FOUND)
+ if (status == AE_NOT_FOUND) {
+ pr_info(ERST_PFX "Table is not found!\n");
goto err;
- else if (ACPI_FAILURE(status)) {
+ } else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/trunk/drivers/acpi/apei/ghes.c b/trunk/drivers/acpi/apei/ghes.c
index 9b3cac0abecc..ebaf037a787b 100644
--- a/trunk/drivers/acpi/apei/ghes.c
+++ b/trunk/drivers/acpi/apei/ghes.c
@@ -33,7 +33,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -46,9 +45,8 @@
#include
#include
#include
-#include
-#include
#include
+#include
#include
#include
#include
@@ -301,7 +299,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_os_map_generic_address(&generic->error_status_address);
+ rc = acpi_pre_map_gar(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -321,7 +319,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_os_unmap_generic_address(&generic->error_status_address);
+ acpi_post_unmap_gar(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -330,7 +328,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+ acpi_post_unmap_gar(&ghes->generic->error_status_address);
}
enum {
@@ -401,7 +399,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
u32 len;
int rc;
- rc = apei_read(&buf_paddr, &g->error_status_address);
+ rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
if (rc) {
if (!silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
@@ -478,27 +476,6 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
}
#endif
}
-#ifdef CONFIG_ACPI_APEI_PCIEAER
- else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
- CPER_SEC_PCIE)) {
- struct cper_sec_pcie *pcie_err;
- pcie_err = (struct cper_sec_pcie *)(gdata+1);
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(sev);
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity);
- }
-
- }
-#endif
}
}
@@ -506,22 +483,16 @@ static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
const struct acpi_hest_generic_status *estatus)
{
- static atomic_t seqno;
- unsigned int curr_seqno;
- char pfx_seq[64];
-
if (pfx == NULL) {
if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
- pfx = KERN_WARNING;
+ pfx = KERN_WARNING HW_ERR;
else
- pfx = KERN_ERR;
+ pfx = KERN_ERR HW_ERR;
}
- curr_seqno = atomic_inc_return(&seqno);
- snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx_seq, generic->header.source_id);
- apei_estatus_print(pfx_seq, estatus);
+ pfx, generic->header.source_id);
+ apei_estatus_print(pfx, estatus);
}
static int ghes_print_estatus(const char *pfx,
@@ -740,34 +711,26 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
-static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
-{
- struct llist_node *next, *tail = NULL;
-
- while (llnode) {
- next = llnode->next;
- llnode->next = tail;
- tail = llnode;
- llnode = next;
- }
-
- return tail;
-}
-
static void ghes_proc_in_irq(struct irq_work *irq_work)
{
- struct llist_node *llnode, *next;
+ struct llist_node *llnode, *next, *tail = NULL;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
u32 len, node_len;
- llnode = llist_del_all(&ghes_estatus_llist);
/*
* Because the time order of estatus in list is reversed,
* revert it back to proper order.
*/
- llnode = llist_nodes_reverse(llnode);
+ llnode = llist_del_all(&ghes_estatus_llist);
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+ llnode = tail;
while (llnode) {
next = llnode->next;
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -787,32 +750,6 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
}
}
-static void ghes_print_queued_estatus(void)
-{
- struct llist_node *llnode;
- struct ghes_estatus_node *estatus_node;
- struct acpi_hest_generic *generic;
- struct acpi_hest_generic_status *estatus;
- u32 len, node_len;
-
- llnode = llist_del_all(&ghes_estatus_llist);
- /*
- * Because the time order of estatus in list is reversed,
- * revert it back to proper order.
- */
- llnode = llist_nodes_reverse(llnode);
- while (llnode) {
- estatus_node = llist_entry(llnode, struct ghes_estatus_node,
- llnode);
- estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = apei_estatus_len(estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
- generic = estatus_node->generic;
- ghes_print_estatus(NULL, generic, estatus);
- llnode = llnode->next;
- }
-}
-
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
@@ -838,8 +775,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- ghes_print_queued_estatus();
- __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
+ __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
diff --git a/trunk/drivers/acpi/apei/hest.c b/trunk/drivers/acpi/apei/hest.c
index 7f00cf38098f..ee7fddc4665c 100644
--- a/trunk/drivers/acpi/apei/hest.c
+++ b/trunk/drivers/acpi/apei/hest.c
@@ -221,9 +221,10 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND)
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table not found.\n");
goto err;
- else if (ACPI_FAILURE(status)) {
+ } else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/trunk/drivers/acpi/atomicio.c b/trunk/drivers/acpi/atomicio.c
index d4a5b3d3657b..cfc0cc10af39 100644
--- a/trunk/drivers/acpi/atomicio.c
+++ b/trunk/drivers/acpi/atomicio.c
@@ -32,8 +32,6 @@
#include
#include
#include
-#include
-#include
#include
#define ACPI_PFX "ACPI: "
@@ -99,37 +97,6 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
return NULL;
}
-#ifndef CONFIG_IA64
-#define should_use_kmap(pfn) page_is_ram(pfn)
-#else
-/* ioremap will take care of cache attributes */
-#define should_use_kmap(pfn) 0
-#endif
-
-static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
-{
- unsigned long pfn;
-
- pfn = pg_off >> PAGE_SHIFT;
- if (should_use_kmap(pfn)) {
- if (pg_sz > PAGE_SIZE)
- return NULL;
- return (void __iomem __force *)kmap(pfn_to_page(pfn));
- } else
- return ioremap(pg_off, pg_sz);
-}
-
-static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
-{
- unsigned long pfn;
-
- pfn = pg_off >> PAGE_SHIFT;
- if (page_is_ram(pfn))
- kunmap(pfn_to_page(pfn));
- else
- iounmap(vaddr);
-}
-
/*
* Used to pre-map the specified IO memory area. First try to find
* whether the area is already pre-mapped, if it is, increase the
@@ -152,7 +119,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
pg_off = paddr & PAGE_MASK;
pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
- vaddr = acpi_map(pg_off, pg_sz);
+ vaddr = ioremap(pg_off, pg_sz);
if (!vaddr)
return NULL;
map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -168,7 +135,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
vaddr = __acpi_try_ioremap(paddr, size);
if (vaddr) {
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- acpi_unmap(pg_off, map->vaddr);
+ iounmap(map->vaddr);
kfree(map);
return vaddr;
}
@@ -177,7 +144,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
return map->vaddr + (paddr - map->paddr);
err_unmap:
- acpi_unmap(pg_off, vaddr);
+ iounmap(vaddr);
return NULL;
}
@@ -210,7 +177,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
return;
synchronize_rcu();
- acpi_unmap(map->paddr, map->vaddr);
+ iounmap(map->vaddr);
kfree(map);
}
@@ -293,21 +260,6 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
}
EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
-#ifdef readq
-static inline u64 read64(const volatile void __iomem *addr)
-{
- return readq(addr);
-}
-#else
-static inline u64 read64(const volatile void __iomem *addr)
-{
- u64 l, h;
- l = readl(addr);
- h = readl(addr+4);
- return l | (h << 32);
-}
-#endif
-
/*
* Can be used in atomic (including NMI) or process context. RCU read
* lock can only be released after the IO memory area accessing.
@@ -328,9 +280,11 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
case 32:
*val = readl(addr);
break;
+#ifdef readq
case 64:
- *val = read64(addr);
+ *val = readq(addr);
break;
+#endif
default:
return -EINVAL;
}
@@ -339,19 +293,6 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
return 0;
}
-#ifdef writeq
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
- writeq(val, addr);
-}
-#else
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
- writel(val, addr);
- writel(val>>32, addr+4);
-}
-#endif
-
static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
{
void __iomem *addr;
@@ -368,9 +309,11 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
case 32:
writel(val, addr);
break;
+#ifdef writeq
case 64:
- write64(val, addr);
+ writeq(val, addr);
break;
+#endif
default:
return -EINVAL;
}
diff --git a/trunk/drivers/acpi/numa.c b/trunk/drivers/acpi/numa.c
index e56f3be7b07d..3b5c3189fd99 100644
--- a/trunk/drivers/acpi/numa.c
+++ b/trunk/drivers/acpi/numa.c
@@ -45,8 +45,6 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
-unsigned char acpi_srat_revision __initdata;
-
int pxm_to_node(int pxm)
{
if (pxm < 0)
@@ -257,13 +255,9 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
- struct acpi_table_srat *srat;
if (!table)
return -EINVAL;
- srat = (struct acpi_table_srat *)table;
- acpi_srat_revision = srat->header.revision;
-
/* Real work done in acpi_table_parse_srat below. */
return 0;
diff --git a/trunk/drivers/acpi/nvs.c b/trunk/drivers/acpi/nvs.c
index 7a2035fa8c71..096787b43c96 100644
--- a/trunk/drivers/acpi/nvs.c
+++ b/trunk/drivers/acpi/nvs.c
@@ -15,56 +15,6 @@
#include
#include
-/* ACPI NVS regions, APEI may use it */
-
-struct nvs_region {
- __u64 phys_start;
- __u64 size;
- struct list_head node;
-};
-
-static LIST_HEAD(nvs_region_list);
-
-#ifdef CONFIG_ACPI_SLEEP
-static int suspend_nvs_register(unsigned long start, unsigned long size);
-#else
-static inline int suspend_nvs_register(unsigned long a, unsigned long b)
-{
- return 0;
-}
-#endif
-
-int acpi_nvs_register(__u64 start, __u64 size)
-{
- struct nvs_region *region;
-
- region = kmalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return -ENOMEM;
- region->phys_start = start;
- region->size = size;
- list_add_tail(®ion->node, &nvs_region_list);
-
- return suspend_nvs_register(start, size);
-}
-
-int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
- void *data)
-{
- int rc;
- struct nvs_region *region;
-
- list_for_each_entry(region, &nvs_region_list, node) {
- rc = func(region->phys_start, region->size, data);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-
-#ifdef CONFIG_ACPI_SLEEP
/*
* Platforms, like ACPI, may want us to save some memory used by them during
* suspend and to restore the contents of this memory during the subsequent
@@ -91,7 +41,7 @@ static LIST_HEAD(nvs_list);
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
-static int suspend_nvs_register(unsigned long start, unsigned long size)
+int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
@@ -209,4 +159,3 @@ void suspend_nvs_restore(void)
if (entry->data)
memcpy(entry->kaddr, entry->data, entry->size);
}
-#endif
diff --git a/trunk/drivers/acpi/osl.c b/trunk/drivers/acpi/osl.c
index fcc12d842bcc..f31c5c5f1b7e 100644
--- a/trunk/drivers/acpi/osl.c
+++ b/trunk/drivers/acpi/osl.c
@@ -83,6 +83,19 @@ static struct workqueue_struct *kacpi_notify_wq;
struct workqueue_struct *kacpi_hotplug_wq;
EXPORT_SYMBOL(kacpi_hotplug_wq);
+struct acpi_res_list {
+ resource_size_t start;
+ resource_size_t end;
+ acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
+ char name[5]; /* only can have a length of 4 chars, make use of this
+ one instead of res->name, no need to kalloc then */
+ struct list_head resource_list;
+ int count;
+};
+
+static LIST_HEAD(resource_list_head);
+static DEFINE_SPINLOCK(acpi_res_lock);
+
/*
* This list of permanent mappings is for memory that may be accessed from
* interrupt context, where we can't do the ioremap().
@@ -153,21 +166,17 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
return supported;
}
-static void __init acpi_request_region (struct acpi_generic_address *gas,
+static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
{
- u64 addr;
-
- /* Handle possible alignment issues */
- memcpy(&addr, &gas->address, sizeof(addr));
- if (!addr || !length)
+ if (!addr->address || !length)
return;
/* Resources are never freed */
- if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- request_region(addr, length, desc);
- else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- request_mem_region(addr, length, desc);
+ if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr->address, length, desc);
+ else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr->address, length, desc);
}
static int __init acpi_reserve_resources(void)
@@ -418,42 +427,35 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
-int acpi_os_map_generic_address(struct acpi_generic_address *gas)
+static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
{
- u64 addr;
void __iomem *virt;
- if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
- /* Handle possible alignment issues */
- memcpy(&addr, &gas->address, sizeof(addr));
- if (!addr || !gas->bit_width)
+ if (!addr->address || !addr->bit_width)
return -EINVAL;
- virt = acpi_os_map_memory(addr, gas->bit_width / 8);
+ virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
if (!virt)
return -EIO;
return 0;
}
-EXPORT_SYMBOL(acpi_os_map_generic_address);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
+static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
{
- u64 addr;
struct acpi_ioremap *map;
- if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
- /* Handle possible alignment issues */
- memcpy(&addr, &gas->address, sizeof(addr));
- if (!addr || !gas->bit_width)
+ if (!addr->address || !addr->bit_width)
return;
mutex_lock(&acpi_ioremap_lock);
- map = acpi_map_lookup(addr, gas->bit_width / 8);
+ map = acpi_map_lookup(addr->address, addr->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
@@ -463,7 +465,6 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
acpi_os_map_cleanup(map);
}
-EXPORT_SYMBOL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
@@ -1277,28 +1278,44 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
* drivers */
int acpi_check_resource_conflict(const struct resource *res)
{
- acpi_adr_space_type space_id;
- acpi_size length;
- u8 warn = 0;
- int clash = 0;
+ struct acpi_res_list *res_list_elem;
+ int ioport = 0, clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
return 0;
- if (res->flags & IORESOURCE_IO)
- space_id = ACPI_ADR_SPACE_SYSTEM_IO;
- else
- space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
+ ioport = res->flags & IORESOURCE_IO;
+
+ spin_lock(&acpi_res_lock);
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+ if (ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_IO))
+ continue;
+ if (!ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_MEMORY))
+ continue;
- length = res->end - res->start + 1;
- if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
- warn = 1;
- clash = acpi_check_address_range(space_id, res->start, length, warn);
+ if (res->end < res_list_elem->start
+ || res_list_elem->end < res->start)
+ continue;
+ clash = 1;
+ break;
+ }
+ spin_unlock(&acpi_res_lock);
if (clash) {
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
+ printk(KERN_WARNING "ACPI: resource %s %pR"
+ " conflicts with ACPI region %s "
+ "[%s 0x%zx-0x%zx]\n",
+ res->name, res, res_list_elem->name,
+ (res_list_elem->resource_type ==
+ ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
+ (size_t) res_list_elem->start,
+ (size_t) res_list_elem->end);
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
printk(KERN_NOTICE "ACPI: This conflict may"
" cause random problems and system"
@@ -1450,6 +1467,155 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
kmem_cache_free(cache, object);
return (AE_OK);
}
+
+static inline int acpi_res_list_add(struct acpi_res_list *res)
+{
+ struct acpi_res_list *res_list_elem;
+
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+
+ if (res->resource_type == res_list_elem->resource_type &&
+ res->start == res_list_elem->start &&
+ res->end == res_list_elem->end) {
+
+ /*
+ * The Region(addr,len) already exist in the list,
+ * just increase the count
+ */
+
+ res_list_elem->count++;
+ return 0;
+ }
+ }
+
+ res->count = 1;
+ list_add(&res->resource_list, &resource_list_head);
+ return 1;
+}
+
+static inline void acpi_res_list_del(struct acpi_res_list *res)
+{
+ struct acpi_res_list *res_list_elem;
+
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+
+ if (res->resource_type == res_list_elem->resource_type &&
+ res->start == res_list_elem->start &&
+ res->end == res_list_elem->end) {
+
+ /*
+ * If the res count is decreased to 0,
+ * remove and free it
+ */
+
+ if (--res_list_elem->count == 0) {
+ list_del(&res_list_elem->resource_list);
+ kfree(res_list_elem);
+ }
+ return;
+ }
+ }
+}
+
+acpi_status
+acpi_os_invalidate_address(
+ u8 space_id,
+ acpi_physical_address address,
+ acpi_size length)
+{
+ struct acpi_res_list res;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ /* Only interference checks against SystemIO and SystemMemory
+ are needed */
+ res.start = address;
+ res.end = address + length - 1;
+ res.resource_type = space_id;
+ spin_lock(&acpi_res_lock);
+ acpi_res_list_del(&res);
+ spin_unlock(&acpi_res_lock);
+ break;
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_EC:
+ case ACPI_ADR_SPACE_SMBUS:
+ case ACPI_ADR_SPACE_CMOS:
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ break;
+ }
+ return AE_OK;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_address
+ *
+ * PARAMETERS: space_id - ACPI space ID
+ * address - Physical address
+ * length - Address length
+ *
+ * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
+ * should return AE_AML_ILLEGAL_ADDRESS.
+ *
+ * DESCRIPTION: Validate a system address via the host OS. Used to validate
+ * the addresses accessed by AML operation regions.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_address (
+ u8 space_id,
+ acpi_physical_address address,
+ acpi_size length,
+ char *name)
+{
+ struct acpi_res_list *res;
+ int added;
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return AE_OK;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ /* Only interference checks against SystemIO and SystemMemory
+ are needed */
+ res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
+ if (!res)
+ return AE_OK;
+ /* ACPI names are fixed to 4 bytes, still better use strlcpy */
+ strlcpy(res->name, name, 5);
+ res->start = address;
+ res->end = address + length - 1;
+ res->resource_type = space_id;
+ spin_lock(&acpi_res_lock);
+ added = acpi_res_list_add(res);
+ spin_unlock(&acpi_res_lock);
+ pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
+ "name: %s\n", added ? "Added" : "Already exist",
+ (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ ? "SystemIO" : "System Memory",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ res->name);
+ if (!added)
+ kfree(res);
+ break;
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_EC:
+ case ACPI_ADR_SPACE_SMBUS:
+ case ACPI_ADR_SPACE_CMOS:
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ break;
+ }
+ return AE_OK;
+}
#endif
acpi_status __init acpi_os_initialize(void)
diff --git a/trunk/drivers/acpi/processor_core.c b/trunk/drivers/acpi/processor_core.c
index c850de4c9a14..3a0428e8435c 100644
--- a/trunk/drivers/acpi/processor_core.c
+++ b/trunk/drivers/acpi/processor_core.c
@@ -173,30 +173,8 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
- if (apic_id == -1) {
- /*
- * On UP processor, there is no _MAT or MADT table.
- * So above apic_id is always set to -1.
- *
- * BIOS may define multiple CPU handles even for UP processor.
- * For example,
- *
- * Scope (_PR)
- * {
- * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
- * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
- * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
- * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
- * }
- *
- * Ignores apic_id and always return 0 for CPU0's handle.
- * Return -1 for other CPU's handle.
- */
- if (acpi_id == 0)
- return acpi_id;
- else
- return apic_id;
- }
+ if (apic_id == -1)
+ return apic_id;
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c
index 0034ede38710..20a68ca386de 100644
--- a/trunk/drivers/acpi/processor_driver.c
+++ b/trunk/drivers/acpi/processor_driver.c
@@ -82,7 +82,7 @@ MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
-static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
+static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -324,8 +324,10 @@ static int acpi_processor_get_info(struct acpi_device *device)
* they are physically not present.
*/
if (pr->id == -1) {
- if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
+ if (ACPI_FAILURE
+ (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
return -ENODEV;
+ }
}
/*
* On some boxes several processors use the same processor bus id.
@@ -537,7 +539,6 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
acpi_processor_power_exit(pr, device);
- sysfs_remove_link(&device->dev.kobj, "sysdev");
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -719,19 +720,18 @@ processor_walk_namespace_cb(acpi_handle handle,
return (AE_OK);
}
-static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
+static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
{
- acpi_handle handle = pr->handle;
if (!is_processor_present(handle)) {
return AE_ERROR;
}
- if (acpi_map_lsapic(handle, &pr->id))
+ if (acpi_map_lsapic(handle, p_cpu))
return AE_ERROR;
- if (arch_register_cpu(pr->id)) {
- acpi_unmap_lsapic(pr->id);
+ if (arch_register_cpu(*p_cpu)) {
+ acpi_unmap_lsapic(*p_cpu);
return AE_ERROR;
}
@@ -748,7 +748,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
return (0);
}
#else
-static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
+static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
{
return AE_ERROR;
}
@@ -827,6 +827,8 @@ static void __exit acpi_processor_exit(void)
acpi_bus_unregister_driver(&acpi_processor_driver);
+ cpuidle_unregister_driver(&acpi_idle_driver);
+
return;
}
diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c
index fdf27b9fce43..69ac373c72ab 100644
--- a/trunk/drivers/ata/ata_piix.c
+++ b/trunk/drivers/ata/ata_piix.c
@@ -1116,13 +1116,6 @@ static int piix_broken_suspend(void)
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
},
},
- {
- .ident = "Satellite Pro A120",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
- },
- },
{
.ident = "Portege M500",
.matches = {
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index c06e0ec11556..11c9aea4f4f7 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -4125,8 +4125,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* device and controller are SATA.
*/
{ "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
diff --git a/trunk/drivers/ata/libata-transport.c b/trunk/drivers/ata/libata-transport.c
index 74aaee30e264..9a7f0ea565df 100644
--- a/trunk/drivers/ata/libata-transport.c
+++ b/trunk/drivers/ata/libata-transport.c
@@ -291,7 +291,6 @@ int ata_tport_add(struct device *parent,
goto tport_err;
}
- device_enable_async_suspend(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
diff --git a/trunk/drivers/ata/pata_bf54x.c b/trunk/drivers/ata/pata_bf54x.c
index 1e65842e2ca7..d6a4677fdf71 100644
--- a/trunk/drivers/ata/pata_bf54x.c
+++ b/trunk/drivers/ata/pata_bf54x.c
@@ -251,8 +251,6 @@ static const u32 udma_tenvmin = 20;
static const u32 udma_tackmin = 20;
static const u32 udma_tssmin = 50;
-#define BFIN_MAX_SG_SEGMENTS 4
-
/**
*
* Function: num_clocks_min
@@ -831,61 +829,79 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
{
- struct ata_port *ap = qc->ap;
- struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
+ unsigned short config = WDSIZE_16;
struct scatterlist *sg;
unsigned int si;
- unsigned int channel;
- unsigned int dir;
- unsigned int size = 0;
dev_dbg(qc->ap->dev, "in atapi dma setup\n");
/* Program the ATA_CTRL register with dir */
if (qc->tf.flags & ATA_TFLAG_WRITE) {
- channel = CH_ATAPI_TX;
- dir = DMA_TO_DEVICE;
+ /* fill the ATAPI DMA controller */
+ set_dma_config(CH_ATAPI_TX, config);
+ set_dma_x_modify(CH_ATAPI_TX, 2);
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
+ set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
+ }
} else {
- channel = CH_ATAPI_RX;
- dir = DMA_FROM_DEVICE;
config |= WNR;
+ /* fill the ATAPI DMA controller */
+ set_dma_config(CH_ATAPI_RX, config);
+ set_dma_x_modify(CH_ATAPI_RX, 2);
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
+ set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
+ }
}
+}
- dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
-
- /* fill the ATAPI DMA controller */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- dma_desc_cpu[si].start_addr = sg_dma_address(sg);
- dma_desc_cpu[si].cfg = config;
- dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
- dma_desc_cpu[si].x_modify = 2;
- size += sg_dma_len(sg);
- }
-
- /* Set the last descriptor to stop mode */
- dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
+/**
+ * bfin_bmdma_start - Start an IDE DMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
- flush_dcache_range((unsigned int)dma_desc_cpu,
- (unsigned int)dma_desc_cpu +
- qc->n_elem * sizeof(struct dma_desc_array));
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ struct scatterlist *sg;
+ unsigned int si;
- /* Enable ATA DMA operation*/
- set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
- set_dma_x_count(channel, 0);
- set_dma_x_modify(channel, 0);
- set_dma_config(channel, config);
+ dev_dbg(qc->ap->dev, "in atapi dma start\n");
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return;
- SSYNC();
+ /* start ATAPI DMA controller*/
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ /*
+ * On blackfin arch, uncacheable memory is not
+ * allocated with flag GFP_DMA. DMA buffer from
+ * common kenel code should be flushed if WB
+ * data cache is enabled. Otherwise, this loop
+ * is an empty loop and optimized out.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ flush_dcache_range(sg_dma_address(sg),
+ sg_dma_address(sg) + sg_dma_len(sg));
+ }
+ enable_dma(CH_ATAPI_TX);
+ dev_dbg(qc->ap->dev, "enable udma write\n");
- /* Send ATA DMA command */
- bfin_exec_command(ap, &qc->tf);
+ /* Send ATA DMA write command */
+ bfin_exec_command(ap, &qc->tf);
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* set ATA DMA write direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
| XFER_DIR));
} else {
+ enable_dma(CH_ATAPI_RX);
+ dev_dbg(qc->ap->dev, "enable udma read\n");
+
+ /* Send ATA DMA read command */
+ bfin_exec_command(ap, &qc->tf);
+
/* set ATA DMA read direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
& ~XFER_DIR));
@@ -897,28 +913,12 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
/* Set ATAPI state machine contorl in terminate sequence */
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
- /* Set transfer length to the total size of sg buffers */
- ATAPI_SET_XFER_LEN(base, size >> 1);
-}
-
-/**
- * bfin_bmdma_start - Start an IDE DMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_start().
- */
-
-static void bfin_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-
- dev_dbg(qc->ap->dev, "in atapi dma start\n");
-
- if (!(ap->udma_mask || ap->mwdma_mask))
- return;
+ /* Set transfer length to buffer len */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
+ }
- /* start ATAPI transfer*/
+ /* Enable ATA DMA operation*/
if (ap->udma_mask)
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
| ULTRA_START);
@@ -935,23 +935,34 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- unsigned int dir;
+ struct scatterlist *sg;
+ unsigned int si;
dev_dbg(qc->ap->dev, "in atapi dma stop\n");
-
if (!(ap->udma_mask || ap->mwdma_mask))
return;
/* stop ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- dir = DMA_TO_DEVICE;
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
disable_dma(CH_ATAPI_TX);
- } else {
- dir = DMA_FROM_DEVICE;
+ else {
disable_dma(CH_ATAPI_RX);
+ if (ap->hsm_task_state & HSM_ST_LAST) {
+ /*
+ * On blackfin arch, uncacheable memory is not
+ * allocated with flag GFP_DMA. DMA buffer from
+ * common kenel code should be invalidated if
+ * data cache is enabled. Otherwise, this loop
+ * is an empty loop and optimized out.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ invalidate_dcache_range(
+ sg_dma_address(sg),
+ sg_dma_address(sg)
+ + sg_dma_len(sg));
+ }
+ }
}
-
- dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
}
/**
@@ -1249,11 +1260,6 @@ static void bfin_port_stop(struct ata_port *ap)
{
dev_dbg(ap->dev, "in atapi port stop\n");
if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
- dma_free_coherent(ap->dev,
- BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
- ap->bmdma_prd,
- ap->bmdma_prd_dma);
-
free_dma(CH_ATAPI_RX);
free_dma(CH_ATAPI_TX);
}
@@ -1265,29 +1271,14 @@ static int bfin_port_start(struct ata_port *ap)
if (!(ap->udma_mask || ap->mwdma_mask))
return 0;
- ap->bmdma_prd = dma_alloc_coherent(ap->dev,
- BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
- &ap->bmdma_prd_dma,
- GFP_KERNEL);
-
- if (ap->bmdma_prd == NULL) {
- dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
- goto out;
- }
-
if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
if (request_dma(CH_ATAPI_TX,
"BFIN ATAPI TX DMA") >= 0)
return 0;
free_dma(CH_ATAPI_RX);
- dma_free_coherent(ap->dev,
- BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
- ap->bmdma_prd,
- ap->bmdma_prd_dma);
}
-out:
ap->udma_mask = 0;
ap->mwdma_mask = 0;
dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1409,7 +1400,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
static struct scsi_host_template bfin_sht = {
ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
+ .sg_tablesize = SG_NONE,
.dma_boundary = ATA_DMA_BOUNDARY,
};
diff --git a/trunk/drivers/ata/sata_fsl.c b/trunk/drivers/ata/sata_fsl.c
index 0120b0d1e9a5..5a2c95ba050a 100644
--- a/trunk/drivers/ata/sata_fsl.c
+++ b/trunk/drivers/ata/sata_fsl.c
@@ -140,7 +140,6 @@ enum {
*/
HCONTROL_ONLINE_PHY_RST = (1 << 31),
HCONTROL_FORCE_OFFLINE = (1 << 30),
- HCONTROL_LEGACY = (1 << 28),
HCONTROL_PARITY_PROT_MOD = (1 << 14),
HCONTROL_DPATH_PARITY = (1 << 12),
HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1224,10 +1223,6 @@ static int sata_fsl_init_controller(struct ata_host *host)
* part of the port_start() callback
*/
- /* sata controller to operate in enterprise mode */
- temp = ioread32(hcr_base + HCONTROL);
- iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
-
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
if (temp & 0x3F)
@@ -1426,12 +1421,6 @@ static int sata_fsl_resume(struct platform_device *op)
/* Recovery the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
- iowrite32((ioread32(hcr_base + HCONTROL)
- | HCONTROL_ONLINE_PHY_RST
- | HCONTROL_SNOOP_ENABLE
- | HCONTROL_PMP_ATTACHED),
- hcr_base + HCONTROL);
-
ata_host_resume(host);
return 0;
}
diff --git a/trunk/drivers/bcma/bcma_private.h b/trunk/drivers/bcma/bcma_private.h
index 0def898a1d15..fda56bde36b8 100644
--- a/trunk/drivers/bcma/bcma_private.h
+++ b/trunk/drivers/bcma/bcma_private.h
@@ -19,7 +19,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
#ifdef CONFIG_PM
-int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
diff --git a/trunk/drivers/bcma/host_pci.c b/trunk/drivers/bcma/host_pci.c
index f59244e33971..443b83a2fd7a 100644
--- a/trunk/drivers/bcma/host_pci.c
+++ b/trunk/drivers/bcma/host_pci.c
@@ -235,32 +235,38 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static int bcma_host_pci_suspend(struct device *dev)
+static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct bcma_bus *bus = pci_get_drvdata(pdev);
-
- bus->mapped_core = NULL;
+ /* Host specific */
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
- return bcma_bus_suspend(bus);
+ return 0;
}
-static int bcma_host_pci_resume(struct device *dev)
+static int bcma_host_pci_resume(struct pci_dev *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct bcma_bus *bus = pci_get_drvdata(pdev);
+ struct bcma_bus *bus = pci_get_drvdata(dev);
+ int err;
- return bcma_bus_resume(bus);
-}
+ /* Host specific */
+ pci_set_power_state(dev, 0);
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+ pci_restore_state(dev);
-static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
- bcma_host_pci_resume);
-#define BCMA_PM_OPS (&bcma_pm_ops)
+ /* Bus specific */
+ err = bcma_bus_resume(bus);
+ if (err)
+ return err;
+ return 0;
+}
#else /* CONFIG_PM */
-
-#define BCMA_PM_OPS NULL
-
+# define bcma_host_pci_suspend NULL
+# define bcma_host_pci_resume NULL
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
@@ -278,7 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
- .driver.pm = BCMA_PM_OPS,
+ .suspend = bcma_host_pci_suspend,
+ .resume = bcma_host_pci_resume,
};
int __init bcma_host_pci_init(void)
diff --git a/trunk/drivers/bcma/main.c b/trunk/drivers/bcma/main.c
index febbc0a1222a..10f92b371e58 100644
--- a/trunk/drivers/bcma/main.c
+++ b/trunk/drivers/bcma/main.c
@@ -241,21 +241,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
}
#ifdef CONFIG_PM
-int bcma_bus_suspend(struct bcma_bus *bus)
-{
- struct bcma_device *core;
-
- list_for_each_entry(core, &bus->cores, list) {
- struct device_driver *drv = core->dev.driver;
- if (drv) {
- struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
- if (adrv->suspend)
- adrv->suspend(core);
- }
- }
- return 0;
-}
-
int bcma_bus_resume(struct bcma_bus *bus)
{
struct bcma_device *core;
@@ -267,15 +252,6 @@ int bcma_bus_resume(struct bcma_bus *bus)
bcma_core_chipcommon_init(&bus->drv_cc);
}
- list_for_each_entry(core, &bus->cores, list) {
- struct device_driver *drv = core->dev.driver;
- if (drv) {
- struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
- if (adrv->resume)
- adrv->resume(core);
- }
- }
-
return 0;
}
#endif
diff --git a/trunk/drivers/block/Kconfig b/trunk/drivers/block/Kconfig
index 4e4c8a4a5fd3..a30aa103f95b 100644
--- a/trunk/drivers/block/Kconfig
+++ b/trunk/drivers/block/Kconfig
@@ -317,17 +317,6 @@ config BLK_DEV_NBD
If unsure, say N.
-config BLK_DEV_NVME
- tristate "NVM Express block device"
- depends on PCI
- ---help---
- The NVM Express driver is for solid state drives directly
- connected to the PCI or PCI Express bus. If you know you
- don't have one of these, it is safe to answer N.
-
- To compile this driver as a module, choose M here: the
- module will be called nvme.
-
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
diff --git a/trunk/drivers/block/Makefile b/trunk/drivers/block/Makefile
index 5b795059f8fb..ad7b74a44ef3 100644
--- a/trunk/drivers/block/Makefile
+++ b/trunk/drivers/block/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
-obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme.c
deleted file mode 100644
index c1dc4d86c221..000000000000
--- a/trunk/drivers/block/nvme.c
+++ /dev/null
@@ -1,1739 +0,0 @@
-/*
- * NVM Express device driver
- * Copyright (c) 2011, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define NVME_Q_DEPTH 1024
-#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
-#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
-#define NVME_IO_TIMEOUT (5 * HZ)
-#define ADMIN_TIMEOUT (60 * HZ)
-
-static int nvme_major;
-module_param(nvme_major, int, 0);
-
-static int use_threaded_interrupts;
-module_param(use_threaded_interrupts, int, 0);
-
-static DEFINE_SPINLOCK(dev_list_lock);
-static LIST_HEAD(dev_list);
-static struct task_struct *nvme_thread;
-
-/*
- * Represents an NVM Express device. Each nvme_dev is a PCI function.
- */
-struct nvme_dev {
- struct list_head node;
- struct nvme_queue **queues;
- u32 __iomem *dbs;
- struct pci_dev *pci_dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
- int instance;
- int queue_count;
- int db_stride;
- u32 ctrl_config;
- struct msix_entry *entry;
- struct nvme_bar __iomem *bar;
- struct list_head namespaces;
- char serial[20];
- char model[40];
- char firmware_rev[8];
-};
-
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
-struct nvme_ns {
- struct list_head list;
-
- struct nvme_dev *dev;
- struct request_queue *queue;
- struct gendisk *disk;
-
- int ns_id;
- int lba_shift;
-};
-
-/*
- * An NVM Express queue. Each device has at least two (one for admin
- * commands and one for I/O commands).
- */
-struct nvme_queue {
- struct device *q_dmadev;
- struct nvme_dev *dev;
- spinlock_t q_lock;
- struct nvme_command *sq_cmds;
- volatile struct nvme_completion *cqes;
- dma_addr_t sq_dma_addr;
- dma_addr_t cq_dma_addr;
- wait_queue_head_t sq_full;
- wait_queue_t sq_cong_wait;
- struct bio_list sq_cong;
- u32 __iomem *q_db;
- u16 q_depth;
- u16 cq_vector;
- u16 sq_head;
- u16 sq_tail;
- u16 cq_head;
- u16 cq_phase;
- unsigned long cmdid_data[];
-};
-
-/*
- * Check we didin't inadvertently grow the command struct
- */
-static inline void _nvme_check_size(void)
-{
- BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
- BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
- BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
-}
-
-typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
- struct nvme_completion *);
-
-struct nvme_cmd_info {
- nvme_completion_fn fn;
- void *ctx;
- unsigned long timeout;
-};
-
-static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
-{
- return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
-}
-
-/**
- * alloc_cmdid() - Allocate a Command ID
- * @nvmeq: The queue that will be used for this command
- * @ctx: A pointer that will be passed to the handler
- * @handler: The function to call on completion
- *
- * Allocate a Command ID for a queue. The data passed in will
- * be passed to the completion handler. This is implemented by using
- * the bottom two bits of the ctx pointer to store the handler ID.
- * Passing in a pointer that's not 4-byte aligned will cause a BUG.
- * We can change this if it becomes a problem.
- *
- * May be called with local interrupts disabled and the q_lock held,
- * or with interrupts enabled and no locks held.
- */
-static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
- nvme_completion_fn handler, unsigned timeout)
-{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- int cmdid;
-
- do {
- cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
- if (cmdid >= depth)
- return -EBUSY;
- } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
-
- info[cmdid].fn = handler;
- info[cmdid].ctx = ctx;
- info[cmdid].timeout = jiffies + timeout;
- return cmdid;
-}
-
-static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
- nvme_completion_fn handler, unsigned timeout)
-{
- int cmdid;
- wait_event_killable(nvmeq->sq_full,
- (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
- return (cmdid < 0) ? -EINTR : cmdid;
-}
-
-/* Special values must be less than 0x1000 */
-#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
-#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
-#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
-#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
-#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
-
-static void special_completion(struct nvme_dev *dev, void *ctx,
- struct nvme_completion *cqe)
-{
- if (ctx == CMD_CTX_CANCELLED)
- return;
- if (ctx == CMD_CTX_FLUSH)
- return;
- if (ctx == CMD_CTX_COMPLETED) {
- dev_warn(&dev->pci_dev->dev,
- "completed id %d twice on queue %d\n",
- cqe->command_id, le16_to_cpup(&cqe->sq_id));
- return;
- }
- if (ctx == CMD_CTX_INVALID) {
- dev_warn(&dev->pci_dev->dev,
- "invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpup(&cqe->sq_id));
- return;
- }
-
- dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
-}
-
-/*
- * Called with local interrupts disabled and the q_lock held. May not sleep.
- */
-static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
- nvme_completion_fn *fn)
-{
- void *ctx;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
-
- if (cmdid >= nvmeq->q_depth) {
- *fn = special_completion;
- return CMD_CTX_INVALID;
- }
- *fn = info[cmdid].fn;
- ctx = info[cmdid].ctx;
- info[cmdid].fn = special_completion;
- info[cmdid].ctx = CMD_CTX_COMPLETED;
- clear_bit(cmdid, nvmeq->cmdid_data);
- wake_up(&nvmeq->sq_full);
- return ctx;
-}
-
-static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
- nvme_completion_fn *fn)
-{
- void *ctx;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- if (fn)
- *fn = info[cmdid].fn;
- ctx = info[cmdid].ctx;
- info[cmdid].fn = special_completion;
- info[cmdid].ctx = CMD_CTX_CANCELLED;
- return ctx;
-}
-
-static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
-{
- return dev->queues[get_cpu() + 1];
-}
-
-static void put_nvmeq(struct nvme_queue *nvmeq)
-{
- put_cpu();
-}
-
-/**
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
- * @nvmeq: The queue to use
- * @cmd: The command to send
- *
- * Safe to use from interrupt context
- */
-static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
-{
- unsigned long flags;
- u16 tail;
- spin_lock_irqsave(&nvmeq->q_lock, flags);
- tail = nvmeq->sq_tail;
- memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
- if (++tail == nvmeq->q_depth)
- tail = 0;
- writel(tail, nvmeq->q_db);
- nvmeq->sq_tail = tail;
- spin_unlock_irqrestore(&nvmeq->q_lock, flags);
-
- return 0;
-}
-
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
- void *private; /* For the use of the submitter of the I/O */
- int npages; /* In the PRP list. 0 means small pool in use */
- int offset; /* Of PRP list */
- int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
- dma_addr_t first_dma;
- struct scatterlist sg[0];
-};
-
-static __le64 **iod_list(struct nvme_iod *iod)
-{
- return ((void *)iod) + iod->offset;
-}
-
-/*
- * Will slightly overestimate the number of pages needed. This is OK
- * as it only leads to a small amount of wasted memory for the lifetime of
- * the I/O.
- */
-static int nvme_npages(unsigned size)
-{
- unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
- return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
-}
-
-static struct nvme_iod *
-nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
-{
- struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
- sizeof(__le64 *) * nvme_npages(nbytes) +
- sizeof(struct scatterlist) * nseg, gfp);
-
- if (iod) {
- iod->offset = offsetof(struct nvme_iod, sg[nseg]);
- iod->npages = -1;
- iod->length = nbytes;
- }
-
- return iod;
-}
-
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
-{
- const int last_prp = PAGE_SIZE / 8 - 1;
- int i;
- __le64 **list = iod_list(iod);
- dma_addr_t prp_dma = iod->first_dma;
-
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
- for (i = 0; i < iod->npages; i++) {
- __le64 *prp_list = list[i];
- dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
- dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
- prp_dma = next_prp_dma;
- }
- kfree(iod);
-}
-
-static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
-{
- struct nvme_queue *nvmeq = get_nvmeq(dev);
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- put_nvmeq(nvmeq);
- wake_up_process(nvme_thread);
-}
-
-static void bio_completion(struct nvme_dev *dev, void *ctx,
- struct nvme_completion *cqe)
-{
- struct nvme_iod *iod = ctx;
- struct bio *bio = iod->private;
- u16 status = le16_to_cpup(&cqe->status) >> 1;
-
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
- bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- nvme_free_iod(dev, iod);
- if (status) {
- bio_endio(bio, -EIO);
- } else if (bio->bi_vcnt > bio->bi_idx) {
- requeue_bio(dev, bio);
- } else {
- bio_endio(bio, 0);
- }
-}
-
-/* length is in bytes. gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev,
- struct nvme_common_command *cmd, struct nvme_iod *iod,
- int total_len, gfp_t gfp)
-{
- struct dma_pool *pool;
- int length = total_len;
- struct scatterlist *sg = iod->sg;
- int dma_len = sg_dma_len(sg);
- u64 dma_addr = sg_dma_address(sg);
- int offset = offset_in_page(dma_addr);
- __le64 *prp_list;
- __le64 **list = iod_list(iod);
- dma_addr_t prp_dma;
- int nprps, i;
-
- cmd->prp1 = cpu_to_le64(dma_addr);
- length -= (PAGE_SIZE - offset);
- if (length <= 0)
- return total_len;
-
- dma_len -= (PAGE_SIZE - offset);
- if (dma_len) {
- dma_addr += (PAGE_SIZE - offset);
- } else {
- sg = sg_next(sg);
- dma_addr = sg_dma_address(sg);
- dma_len = sg_dma_len(sg);
- }
-
- if (length <= PAGE_SIZE) {
- cmd->prp2 = cpu_to_le64(dma_addr);
- return total_len;
- }
-
- nprps = DIV_ROUND_UP(length, PAGE_SIZE);
- if (nprps <= (256 / 8)) {
- pool = dev->prp_small_pool;
- iod->npages = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->npages = 1;
- }
-
- prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
- if (!prp_list) {
- cmd->prp2 = cpu_to_le64(dma_addr);
- iod->npages = -1;
- return (total_len - length) + PAGE_SIZE;
- }
- list[0] = prp_list;
- iod->first_dma = prp_dma;
- cmd->prp2 = cpu_to_le64(prp_dma);
- i = 0;
- for (;;) {
- if (i == PAGE_SIZE / 8) {
- __le64 *old_prp_list = prp_list;
- prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
- if (!prp_list)
- return total_len - length;
- list[iod->npages++] = prp_list;
- prp_list[0] = old_prp_list[i - 1];
- old_prp_list[i - 1] = cpu_to_le64(prp_dma);
- i = 1;
- }
- prp_list[i++] = cpu_to_le64(dma_addr);
- dma_len -= PAGE_SIZE;
- dma_addr += PAGE_SIZE;
- length -= PAGE_SIZE;
- if (length <= 0)
- break;
- if (dma_len > 0)
- continue;
- BUG_ON(dma_len < 0);
- sg = sg_next(sg);
- dma_addr = sg_dma_address(sg);
- dma_len = sg_dma_len(sg);
- }
-
- return total_len;
-}
-
-/* NVMe scatterlists require no holes in the virtual address */
-#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
- (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
-
-static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
- struct bio *bio, enum dma_data_direction dma_dir, int psegs)
-{
- struct bio_vec *bvec, *bvprv = NULL;
- struct scatterlist *sg = NULL;
- int i, old_idx, length = 0, nsegs = 0;
-
- sg_init_table(iod->sg, psegs);
- old_idx = bio->bi_idx;
- bio_for_each_segment(bvec, bio, i) {
- if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
- sg->length += bvec->bv_len;
- } else {
- if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
- break;
- sg = sg ? sg + 1 : iod->sg;
- sg_set_page(sg, bvec->bv_page, bvec->bv_len,
- bvec->bv_offset);
- nsegs++;
- }
- length += bvec->bv_len;
- bvprv = bvec;
- }
- bio->bi_idx = i;
- iod->nents = nsegs;
- sg_mark_end(sg);
- if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
- bio->bi_idx = old_idx;
- return -ENOMEM;
- }
- return length;
-}
-
-static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- int cmdid)
-{
- struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.command_id = cmdid;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
-
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
-
- return 0;
-}
-
-static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
-{
- int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
- special_completion, NVME_IO_TIMEOUT);
- if (unlikely(cmdid < 0))
- return cmdid;
-
- return nvme_submit_flush(nvmeq, ns, cmdid);
-}
-
-/*
- * Called with local interrupts disabled and the q_lock held. May not sleep.
- */
-static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct bio *bio)
-{
- struct nvme_command *cmnd;
- struct nvme_iod *iod;
- enum dma_data_direction dma_dir;
- int cmdid, length, result = -ENOMEM;
- u16 control;
- u32 dsmgmt;
- int psegs = bio_phys_segments(ns->queue, bio);
-
- if ((bio->bi_rw & REQ_FLUSH) && psegs) {
- result = nvme_submit_flush_data(nvmeq, ns);
- if (result)
- return result;
- }
-
- iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
- if (!iod)
- goto nomem;
- iod->private = bio;
-
- result = -EBUSY;
- cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
- if (unlikely(cmdid < 0))
- goto free_iod;
-
- if ((bio->bi_rw & REQ_FLUSH) && !psegs)
- return nvme_submit_flush(nvmeq, ns, cmdid);
-
- control = 0;
- if (bio->bi_rw & REQ_FUA)
- control |= NVME_RW_FUA;
- if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
- control |= NVME_RW_LR;
-
- dsmgmt = 0;
- if (bio->bi_rw & REQ_RAHEAD)
- dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
-
- cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-
- memset(cmnd, 0, sizeof(*cmnd));
- if (bio_data_dir(bio)) {
- cmnd->rw.opcode = nvme_cmd_write;
- dma_dir = DMA_TO_DEVICE;
- } else {
- cmnd->rw.opcode = nvme_cmd_read;
- dma_dir = DMA_FROM_DEVICE;
- }
-
- result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
- if (result < 0)
- goto free_iod;
- length = result;
-
- cmnd->rw.command_id = cmdid;
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
- GFP_ATOMIC);
- cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
- cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
- cmnd->rw.control = cpu_to_le16(control);
- cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
-
- bio->bi_sector += length >> 9;
-
- if (++nvmeq->sq_tail == nvmeq->q_depth)
- nvmeq->sq_tail = 0;
- writel(nvmeq->sq_tail, nvmeq->q_db);
-
- return 0;
-
- free_iod:
- nvme_free_iod(nvmeq->dev, iod);
- nomem:
- return result;
-}
-
-static void nvme_make_request(struct request_queue *q, struct bio *bio)
-{
- struct nvme_ns *ns = q->queuedata;
- struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
- int result = -EBUSY;
-
- spin_lock_irq(&nvmeq->q_lock);
- if (bio_list_empty(&nvmeq->sq_cong))
- result = nvme_submit_bio_queue(nvmeq, ns, bio);
- if (unlikely(result)) {
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- }
-
- spin_unlock_irq(&nvmeq->q_lock);
- put_nvmeq(nvmeq);
-}
-
-static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
-{
- u16 head, phase;
-
- head = nvmeq->cq_head;
- phase = nvmeq->cq_phase;
-
- for (;;) {
- void *ctx;
- nvme_completion_fn fn;
- struct nvme_completion cqe = nvmeq->cqes[head];
- if ((le16_to_cpu(cqe.status) & 1) != phase)
- break;
- nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
- if (++head == nvmeq->q_depth) {
- head = 0;
- phase = !phase;
- }
-
- ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
- fn(nvmeq->dev, ctx, &cqe);
- }
-
- /* If the controller ignores the cq head doorbell and continuously
- * writes to the queue, it is theoretically possible to wrap around
- * the queue twice and mistakenly return IRQ_NONE. Linux only
- * requires that 0.1% of your interrupts are handled, so this isn't
- * a big problem.
- */
- if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
- return IRQ_NONE;
-
- writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
- nvmeq->cq_head = head;
- nvmeq->cq_phase = phase;
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t nvme_irq(int irq, void *data)
-{
- irqreturn_t result;
- struct nvme_queue *nvmeq = data;
- spin_lock(&nvmeq->q_lock);
- result = nvme_process_cq(nvmeq);
- spin_unlock(&nvmeq->q_lock);
- return result;
-}
-
-static irqreturn_t nvme_irq_check(int irq, void *data)
-{
- struct nvme_queue *nvmeq = data;
- struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
- if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
- return IRQ_NONE;
- return IRQ_WAKE_THREAD;
-}
-
-static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
-{
- spin_lock_irq(&nvmeq->q_lock);
- cancel_cmdid(nvmeq, cmdid, NULL);
- spin_unlock_irq(&nvmeq->q_lock);
-}
-
-struct sync_cmd_info {
- struct task_struct *task;
- u32 result;
- int status;
-};
-
-static void sync_completion(struct nvme_dev *dev, void *ctx,
- struct nvme_completion *cqe)
-{
- struct sync_cmd_info *cmdinfo = ctx;
- cmdinfo->result = le32_to_cpup(&cqe->result);
- cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
- wake_up_process(cmdinfo->task);
-}
-
-/*
- * Returns 0 on success. If the result is negative, it's a Linux error code;
- * if the result is positive, it's an NVM Express status code
- */
-static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
- struct nvme_command *cmd, u32 *result, unsigned timeout)
-{
- int cmdid;
- struct sync_cmd_info cmdinfo;
-
- cmdinfo.task = current;
- cmdinfo.status = -EINTR;
-
- cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
- timeout);
- if (cmdid < 0)
- return cmdid;
- cmd->common.command_id = cmdid;
-
- set_current_state(TASK_KILLABLE);
- nvme_submit_cmd(nvmeq, cmd);
- schedule();
-
- if (cmdinfo.status == -EINTR) {
- nvme_abort_command(nvmeq, cmdid);
- return -EINTR;
- }
-
- if (result)
- *result = cmdinfo.result;
-
- return cmdinfo.status;
-}
-
-static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
- u32 *result)
-{
- return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
-}
-
-static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
-{
- int status;
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.delete_queue.opcode = opcode;
- c.delete_queue.qid = cpu_to_le16(id);
-
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- if (status)
- return -EIO;
- return 0;
-}
-
-static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
- struct nvme_queue *nvmeq)
-{
- int status;
- struct nvme_command c;
- int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
-
- memset(&c, 0, sizeof(c));
- c.create_cq.opcode = nvme_admin_create_cq;
- c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
- c.create_cq.cqid = cpu_to_le16(qid);
- c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
- c.create_cq.cq_flags = cpu_to_le16(flags);
- c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
-
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- if (status)
- return -EIO;
- return 0;
-}
-
-static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
- struct nvme_queue *nvmeq)
-{
- int status;
- struct nvme_command c;
- int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
-
- memset(&c, 0, sizeof(c));
- c.create_sq.opcode = nvme_admin_create_sq;
- c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
- c.create_sq.sqid = cpu_to_le16(qid);
- c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
- c.create_sq.sq_flags = cpu_to_le16(flags);
- c.create_sq.cqid = cpu_to_le16(qid);
-
- status = nvme_submit_admin_cmd(dev, &c, NULL);
- if (status)
- return -EIO;
- return 0;
-}
-
-static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
-{
- return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
-}
-
-static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
-{
- return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
-}
-
-static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
- dma_addr_t dma_addr)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
- c.identify.prp1 = cpu_to_le64(dma_addr);
- c.identify.cns = cpu_to_le32(cns);
-
- return nvme_submit_admin_cmd(dev, &c, NULL);
-}
-
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_get_features;
- c.features.prp1 = cpu_to_le64(dma_addr);
- c.features.fid = cpu_to_le32(fid);
- c.features.dword11 = cpu_to_le32(dword11);
-
- return nvme_submit_admin_cmd(dev, &c, NULL);
-}
-
-static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
- unsigned dword11, dma_addr_t dma_addr, u32 *result)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- c.features.opcode = nvme_admin_set_features;
- c.features.prp1 = cpu_to_le64(dma_addr);
- c.features.fid = cpu_to_le32(fid);
- c.features.dword11 = cpu_to_le32(dword11);
-
- return nvme_submit_admin_cmd(dev, &c, result);
-}
-
-static void nvme_free_queue(struct nvme_dev *dev, int qid)
-{
- struct nvme_queue *nvmeq = dev->queues[qid];
- int vector = dev->entry[nvmeq->cq_vector].vector;
-
- irq_set_affinity_hint(vector, NULL);
- free_irq(vector, nvmeq);
-
- /* Don't tell the adapter to delete the admin queue */
- if (qid) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
- }
-
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
- (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
-}
-
-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int vector)
-{
- struct device *dmadev = &dev->pci_dev->dev;
- unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
- struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
- if (!nvmeq)
- return NULL;
-
- nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
- &nvmeq->cq_dma_addr, GFP_KERNEL);
- if (!nvmeq->cqes)
- goto free_nvmeq;
- memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
-
- nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- if (!nvmeq->sq_cmds)
- goto free_cqdma;
-
- nvmeq->q_dmadev = dmadev;
- nvmeq->dev = dev;
- spin_lock_init(&nvmeq->q_lock);
- nvmeq->cq_head = 0;
- nvmeq->cq_phase = 1;
- init_waitqueue_head(&nvmeq->sq_full);
- init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
- bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
- nvmeq->q_depth = depth;
- nvmeq->cq_vector = vector;
-
- return nvmeq;
-
- free_cqdma:
- dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
- nvmeq->cq_dma_addr);
- free_nvmeq:
- kfree(nvmeq);
- return NULL;
-}
-
-static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- const char *name)
-{
- if (use_threaded_interrupts)
- return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED,
- name, nvmeq);
- return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
-}
-
-static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
- int qid, int cq_size, int vector)
-{
- int result;
- struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
-
- if (!nvmeq)
- return ERR_PTR(-ENOMEM);
-
- result = adapter_alloc_cq(dev, qid, nvmeq);
- if (result < 0)
- goto free_nvmeq;
-
- result = adapter_alloc_sq(dev, qid, nvmeq);
- if (result < 0)
- goto release_cq;
-
- result = queue_request_irq(dev, nvmeq, "nvme");
- if (result < 0)
- goto release_sq;
-
- return nvmeq;
-
- release_sq:
- adapter_delete_sq(dev, qid);
- release_cq:
- adapter_delete_cq(dev, qid);
- free_nvmeq:
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
- (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
- return ERR_PTR(result);
-}
-
-static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
-{
- int result;
- u32 aqa;
- u64 cap;
- unsigned long timeout;
- struct nvme_queue *nvmeq;
-
- dev->dbs = ((void __iomem *)dev->bar) + 4096;
-
- nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
- if (!nvmeq)
- return -ENOMEM;
-
- aqa = nvmeq->q_depth - 1;
- aqa |= aqa << 16;
-
- dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
- dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
- dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
- dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
-
- writel(0, &dev->bar->cc);
- writel(aqa, &dev->bar->aqa);
- writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
- writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
- writel(dev->ctrl_config, &dev->bar->cc);
-
- cap = readq(&dev->bar->cap);
- timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
- dev->db_stride = NVME_CAP_STRIDE(cap);
-
- while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
- msleep(100);
- if (fatal_signal_pending(current))
- return -EINTR;
- if (time_after(jiffies, timeout)) {
- dev_err(&dev->pci_dev->dev,
- "Device not ready; aborting initialisation\n");
- return -ENODEV;
- }
- }
-
- result = queue_request_irq(dev, nvmeq, "nvme admin");
- dev->queues[0] = nvmeq;
- return result;
-}
-
-static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
- unsigned long addr, unsigned length)
-{
- int i, err, count, nents, offset;
- struct scatterlist *sg;
- struct page **pages;
- struct nvme_iod *iod;
-
- if (addr & 3)
- return ERR_PTR(-EINVAL);
- if (!length)
- return ERR_PTR(-EINVAL);
-
- offset = offset_in_page(addr);
- count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
- pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
-
- err = get_user_pages_fast(addr, count, 1, pages);
- if (err < count) {
- count = err;
- err = -EFAULT;
- goto put_pages;
- }
-
- iod = nvme_alloc_iod(count, length, GFP_KERNEL);
- sg = iod->sg;
- sg_init_table(sg, count);
- for (i = 0; i < count; i++) {
- sg_set_page(&sg[i], pages[i],
- min_t(int, length, PAGE_SIZE - offset), offset);
- length -= (PAGE_SIZE - offset);
- offset = 0;
- }
- sg_mark_end(&sg[i - 1]);
- iod->nents = count;
-
- err = -ENOMEM;
- nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (!nents)
- goto free_iod;
-
- kfree(pages);
- return iod;
-
- free_iod:
- kfree(iod);
- put_pages:
- for (i = 0; i < count; i++)
- put_page(pages[i]);
- kfree(pages);
- return ERR_PTR(err);
-}
-
-static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
- struct nvme_iod *iod)
-{
- int i;
-
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
- write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- for (i = 0; i < iod->nents; i++)
- put_page(sg_page(&iod->sg[i]));
-}
-
-static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
-{
- struct nvme_dev *dev = ns->dev;
- struct nvme_queue *nvmeq;
- struct nvme_user_io io;
- struct nvme_command c;
- unsigned length;
- int status;
- struct nvme_iod *iod;
-
- if (copy_from_user(&io, uio, sizeof(io)))
- return -EFAULT;
- length = (io.nblocks + 1) << ns->lba_shift;
-
- switch (io.opcode) {
- case nvme_cmd_write:
- case nvme_cmd_read:
- case nvme_cmd_compare:
- iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
- break;
- default:
- return -EINVAL;
- }
-
- if (IS_ERR(iod))
- return PTR_ERR(iod);
-
- memset(&c, 0, sizeof(c));
- c.rw.opcode = io.opcode;
- c.rw.flags = io.flags;
- c.rw.nsid = cpu_to_le32(ns->ns_id);
- c.rw.slba = cpu_to_le64(io.slba);
- c.rw.length = cpu_to_le16(io.nblocks);
- c.rw.control = cpu_to_le16(io.control);
- c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
- c.rw.reftag = io.reftag;
- c.rw.apptag = io.apptag;
- c.rw.appmask = io.appmask;
- /* XXX: metadata */
- length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
-
- nvmeq = get_nvmeq(dev);
- /*
- * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
- * disabled. We may be preempted at any point, and be rescheduled
- * to a different CPU. That will cause cacheline bouncing, but no
- * additional races since q_lock already protects against other CPUs.
- */
- put_nvmeq(nvmeq);
- if (length != (io.nblocks + 1) << ns->lba_shift)
- status = -ENOMEM;
- else
- status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
-
- nvme_unmap_user_pages(dev, io.opcode & 1, iod);
- nvme_free_iod(dev, iod);
- return status;
-}
-
-static int nvme_user_admin_cmd(struct nvme_ns *ns,
- struct nvme_admin_cmd __user *ucmd)
-{
- struct nvme_dev *dev = ns->dev;
- struct nvme_admin_cmd cmd;
- struct nvme_command c;
- int status, length;
- struct nvme_iod *iod;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
- return -EFAULT;
-
- memset(&c, 0, sizeof(c));
- c.common.opcode = cmd.opcode;
- c.common.flags = cmd.flags;
- c.common.nsid = cpu_to_le32(cmd.nsid);
- c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
- c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
- c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
- c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
- c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
- c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
- c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
- c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
-
- length = cmd.data_len;
- if (cmd.data_len) {
- iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
- length);
- if (IS_ERR(iod))
- return PTR_ERR(iod);
- length = nvme_setup_prps(dev, &c.common, iod, length,
- GFP_KERNEL);
- }
-
- if (length != cmd.data_len)
- status = -ENOMEM;
- else
- status = nvme_submit_admin_cmd(dev, &c, NULL);
-
- if (cmd.data_len) {
- nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
- nvme_free_iod(dev, iod);
- }
- return status;
-}
-
-static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
-
- switch (cmd) {
- case NVME_IOCTL_ID:
- return ns->ns_id;
- case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_admin_cmd(ns, (void __user *)arg);
- case NVME_IOCTL_SUBMIT_IO:
- return nvme_submit_io(ns, (void __user *)arg);
- default:
- return -ENOTTY;
- }
-}
-
-static const struct block_device_operations nvme_fops = {
- .owner = THIS_MODULE,
- .ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
-};
-
-static void nvme_timeout_ios(struct nvme_queue *nvmeq)
-{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- unsigned long now = jiffies;
- int cmdid;
-
- for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
- void *ctx;
- nvme_completion_fn fn;
- static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
-
- if (!time_after(now, info[cmdid].timeout))
- continue;
- dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
- ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
- }
-}
-
-static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
-{
- while (bio_list_peek(&nvmeq->sq_cong)) {
- struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
- struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
- if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
- bio_list_add_head(&nvmeq->sq_cong, bio);
- break;
- }
- if (bio_list_empty(&nvmeq->sq_cong))
- remove_wait_queue(&nvmeq->sq_full,
- &nvmeq->sq_cong_wait);
- }
-}
-
-static int nvme_kthread(void *data)
-{
- struct nvme_dev *dev;
-
- while (!kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
- spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
- int i;
- for (i = 0; i < dev->queue_count; i++) {
- struct nvme_queue *nvmeq = dev->queues[i];
- if (!nvmeq)
- continue;
- spin_lock_irq(&nvmeq->q_lock);
- if (nvme_process_cq(nvmeq))
- printk("process_cq did something\n");
- nvme_timeout_ios(nvmeq);
- nvme_resubmit_bios(nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
- }
- }
- spin_unlock(&dev_list_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- }
- return 0;
-}
-
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
- int index, error;
-
- do {
- if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
- return -1;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_index_ida, &index);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- index = -1;
- return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_index_ida, index);
- spin_unlock(&dev_list_lock);
-}
-
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
- struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
-{
- struct nvme_ns *ns;
- struct gendisk *disk;
- int lbaf;
-
- if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
- return NULL;
-
- ns = kzalloc(sizeof(*ns), GFP_KERNEL);
- if (!ns)
- return NULL;
- ns->queue = blk_alloc_queue(GFP_KERNEL);
- if (!ns->queue)
- goto out_free_ns;
- ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
- queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
- blk_queue_make_request(ns->queue, nvme_make_request);
- ns->dev = dev;
- ns->queue->queuedata = ns;
-
- disk = alloc_disk(NVME_MINORS);
- if (!disk)
- goto out_free_queue;
- ns->ns_id = nsid;
- ns->disk = disk;
- lbaf = id->flbas & 0xf;
- ns->lba_shift = id->lbaf[lbaf].ds;
-
- disk->major = nvme_major;
- disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
- disk->fops = &nvme_fops;
- disk->private_data = ns;
- disk->queue = ns->queue;
- disk->driverfs_dev = &dev->pci_dev->dev;
- sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
- set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
- return ns;
-
- out_free_queue:
- blk_cleanup_queue(ns->queue);
- out_free_ns:
- kfree(ns);
- return NULL;
-}
-
-static void nvme_ns_free(struct nvme_ns *ns)
-{
- int index = ns->disk->first_minor / NVME_MINORS;
- put_disk(ns->disk);
- nvme_put_ns_idx(index);
- blk_cleanup_queue(ns->queue);
- kfree(ns);
-}
-
-static int set_queue_count(struct nvme_dev *dev, int count)
-{
- int status;
- u32 result;
- u32 q_count = (count - 1) | ((count - 1) << 16);
-
- status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
- &result);
- if (status)
- return -EIO;
- return min(result & 0xffff, result >> 16) + 1;
-}
-
-static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
-{
- int result, cpu, i, nr_io_queues, db_bar_size;
-
- nr_io_queues = num_online_cpus();
- result = set_queue_count(dev, nr_io_queues);
- if (result < 0)
- return result;
- if (result < nr_io_queues)
- nr_io_queues = result;
-
- /* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
-
- db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
- if (db_bar_size > 8192) {
- iounmap(dev->bar);
- dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
- db_bar_size);
- dev->dbs = ((void __iomem *)dev->bar) + 4096;
- dev->queues[0]->q_db = dev->dbs;
- }
-
- for (i = 0; i < nr_io_queues; i++)
- dev->entry[i].entry = i;
- for (;;) {
- result = pci_enable_msix(dev->pci_dev, dev->entry,
- nr_io_queues);
- if (result == 0) {
- break;
- } else if (result > 0) {
- nr_io_queues = result;
- continue;
- } else {
- nr_io_queues = 1;
- break;
- }
- }
-
- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
- /* XXX: handle failure here */
-
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < nr_io_queues; i++) {
- irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-
- for (i = 0; i < nr_io_queues; i++) {
- dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
- NVME_Q_DEPTH, i);
- if (IS_ERR(dev->queues[i + 1]))
- return PTR_ERR(dev->queues[i + 1]);
- dev->queue_count++;
- }
-
- for (; i < num_possible_cpus(); i++) {
- int target = i % rounddown_pow_of_two(dev->queue_count - 1);
- dev->queues[i + 1] = dev->queues[target + 1];
- }
-
- return 0;
-}
-
-static void nvme_free_queues(struct nvme_dev *dev)
-{
- int i;
-
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_free_queue(dev, i);
-}
-
-static int __devinit nvme_dev_add(struct nvme_dev *dev)
-{
- int res, nn, i;
- struct nvme_ns *ns, *next;
- struct nvme_id_ctrl *ctrl;
- struct nvme_id_ns *id_ns;
- void *mem;
- dma_addr_t dma_addr;
-
- res = nvme_setup_io_queues(dev);
- if (res)
- return res;
-
- mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
- GFP_KERNEL);
-
- res = nvme_identify(dev, 0, 1, dma_addr);
- if (res) {
- res = -EIO;
- goto out_free;
- }
-
- ctrl = mem;
- nn = le32_to_cpup(&ctrl->nn);
- memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
- memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
- memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
-
- id_ns = mem;
- for (i = 1; i <= nn; i++) {
- res = nvme_identify(dev, i, 0, dma_addr);
- if (res)
- continue;
-
- if (id_ns->ncap == 0)
- continue;
-
- res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
- dma_addr + 4096);
- if (res)
- continue;
-
- ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
- if (ns)
- list_add_tail(&ns->list, &dev->namespaces);
- }
- list_for_each_entry(ns, &dev->namespaces, list)
- add_disk(ns->disk);
-
- goto out;
-
- out_free:
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- nvme_ns_free(ns);
- }
-
- out:
- dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
- return res;
-}
-
-static int nvme_dev_remove(struct nvme_dev *dev)
-{
- struct nvme_ns *ns, *next;
-
- spin_lock(&dev_list_lock);
- list_del(&dev->node);
- spin_unlock(&dev_list_lock);
-
- /* TODO: wait all I/O finished or cancel them */
-
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- del_gendisk(ns->disk);
- nvme_ns_free(ns);
- }
-
- nvme_free_queues(dev);
-
- return 0;
-}
-
-static int nvme_setup_prp_pools(struct nvme_dev *dev)
-{
- struct device *dmadev = &dev->pci_dev->dev;
- dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
- PAGE_SIZE, PAGE_SIZE, 0);
- if (!dev->prp_page_pool)
- return -ENOMEM;
-
- /* Optimisation for I/Os between 4k and 128k */
- dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
- 256, 256, 0);
- if (!dev->prp_small_pool) {
- dma_pool_destroy(dev->prp_page_pool);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void nvme_release_prp_pools(struct nvme_dev *dev)
-{
- dma_pool_destroy(dev->prp_page_pool);
- dma_pool_destroy(dev->prp_small_pool);
-}
-
-/* XXX: Use an ida or something to let remove / add work correctly */
-static void nvme_set_instance(struct nvme_dev *dev)
-{
- static int instance;
- dev->instance = instance++;
-}
-
-static void nvme_release_instance(struct nvme_dev *dev)
-{
-}
-
-static int __devinit nvme_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int bars, result = -ENOMEM;
- struct nvme_dev *dev;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
- dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
- GFP_KERNEL);
- if (!dev->entry)
- goto free;
- dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
- GFP_KERNEL);
- if (!dev->queues)
- goto free;
-
- if (pci_enable_device_mem(pdev))
- goto free;
- pci_set_master(pdev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_request_selected_regions(pdev, bars, "nvme"))
- goto disable;
-
- INIT_LIST_HEAD(&dev->namespaces);
- dev->pci_dev = pdev;
- pci_set_drvdata(pdev, dev);
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- nvme_set_instance(dev);
- dev->entry[0].vector = pdev->irq;
-
- result = nvme_setup_prp_pools(dev);
- if (result)
- goto disable_msix;
-
- dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
- if (!dev->bar) {
- result = -ENOMEM;
- goto disable_msix;
- }
-
- result = nvme_configure_admin_queue(dev);
- if (result)
- goto unmap;
- dev->queue_count++;
-
- spin_lock(&dev_list_lock);
- list_add(&dev->node, &dev_list);
- spin_unlock(&dev_list_lock);
-
- result = nvme_dev_add(dev);
- if (result)
- goto delete;
-
- return 0;
-
- delete:
- spin_lock(&dev_list_lock);
- list_del(&dev->node);
- spin_unlock(&dev_list_lock);
-
- nvme_free_queues(dev);
- unmap:
- iounmap(dev->bar);
- disable_msix:
- pci_disable_msix(pdev);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
- disable:
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- free:
- kfree(dev->queues);
- kfree(dev->entry);
- kfree(dev);
- return result;
-}
-
-static void __devexit nvme_remove(struct pci_dev *pdev)
-{
- struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_dev_remove(dev);
- pci_disable_msix(pdev);
- iounmap(dev->bar);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
- pci_disable_device(pdev);
- pci_release_regions(pdev);
- kfree(dev->queues);
- kfree(dev->entry);
- kfree(dev);
-}
-
-/* These functions are yet to be implemented */
-#define nvme_error_detected NULL
-#define nvme_dump_registers NULL
-#define nvme_link_reset NULL
-#define nvme_slot_reset NULL
-#define nvme_error_resume NULL
-#define nvme_suspend NULL
-#define nvme_resume NULL
-
-static struct pci_error_handlers nvme_err_handler = {
- .error_detected = nvme_error_detected,
- .mmio_enabled = nvme_dump_registers,
- .link_reset = nvme_link_reset,
- .slot_reset = nvme_slot_reset,
- .resume = nvme_error_resume,
-};
-
-/* Move to pci_ids.h later */
-#define PCI_CLASS_STORAGE_EXPRESS 0x010802
-
-static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
- { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, nvme_id_table);
-
-static struct pci_driver nvme_driver = {
- .name = "nvme",
- .id_table = nvme_id_table,
- .probe = nvme_probe,
- .remove = __devexit_p(nvme_remove),
- .suspend = nvme_suspend,
- .resume = nvme_resume,
- .err_handler = &nvme_err_handler,
-};
-
-static int __init nvme_init(void)
-{
- int result = -EBUSY;
-
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- if (IS_ERR(nvme_thread))
- return PTR_ERR(nvme_thread);
-
- nvme_major = register_blkdev(nvme_major, "nvme");
- if (nvme_major <= 0)
- goto kill_kthread;
-
- result = pci_register_driver(&nvme_driver);
- if (result)
- goto unregister_blkdev;
- return 0;
-
- unregister_blkdev:
- unregister_blkdev(nvme_major, "nvme");
- kill_kthread:
- kthread_stop(nvme_thread);
- return result;
-}
-
-static void __exit nvme_exit(void)
-{
- pci_unregister_driver(&nvme_driver);
- unregister_blkdev(nvme_major, "nvme");
- kthread_stop(nvme_thread);
-}
-
-MODULE_AUTHOR("Matthew Wilcox ");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.8");
-module_init(nvme_init);
-module_exit(nvme_exit);
diff --git a/trunk/drivers/char/tpm/tpm.c b/trunk/drivers/char/tpm/tpm.c
index 32362cf35b8d..6a8771f47a55 100644
--- a/trunk/drivers/char/tpm/tpm.c
+++ b/trunk/drivers/char/tpm/tpm.c
@@ -846,15 +846,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
do {
rc = __tpm_pcr_read(chip, 0, digest);
- if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
- dev_info(chip->dev,
- "TPM is disabled/deactivated (0x%X)\n", rc);
- /* TPM is disabled and/or deactivated; driver can
- * proceed and TPM does handle commands for
- * suspend/resume correctly
- */
- return 0;
- }
if (rc != TPM_WARN_DOING_SELFTEST)
return rc;
msleep(delay_msec);
diff --git a/trunk/drivers/char/tpm/tpm.h b/trunk/drivers/char/tpm/tpm.h
index 010547138281..8c1df302fbb6 100644
--- a/trunk/drivers/char/tpm/tpm.h
+++ b/trunk/drivers/char/tpm/tpm.h
@@ -39,9 +39,6 @@ enum tpm_addr {
};
#define TPM_WARN_DOING_SELFTEST 0x802
-#define TPM_ERR_DEACTIVATED 0x6
-#define TPM_ERR_DISABLED 0x7
-
#define TPM_HEADER_SIZE 10
extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
char *);
diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig
index f1a274994bb1..5a99bb3f255a 100644
--- a/trunk/drivers/dma/Kconfig
+++ b/trunk/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on ARCH_MXC
+ depends on SOC_IMX31 || SOC_IMX35
select DMA_ENGINE
default y
help
@@ -187,13 +187,6 @@ config TIMB_DMA
help
Enable support for the Timberdale FPGA DMA engine.
-config SIRF_DMA
- tristate "CSR SiRFprimaII DMA support"
- depends on ARCH_PRIMA2
- select DMA_ENGINE
- help
- Enable support for the CSR SiRFprimaII DMA engine.
-
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
@@ -208,26 +201,26 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
+ tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for LAPIS Semiconductor IOH(Input/
- Output Hub), ML7213, ML7223 and ML7831.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
- for MP(Media Phone) use and ML7831 IOH is for general purpose use.
- ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7213 and ML7223.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+ for MP(Media Phone) use.
+ ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MXC
+ depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51/53 chips.
+ Freescale i.MX25/31/35/51 chips.
config IMX_DMA
tristate "i.MX DMA support"
diff --git a/trunk/drivers/dma/Makefile b/trunk/drivers/dma/Makefile
index 009a222e8283..30cf3b1f0c5c 100644
--- a/trunk/drivers/dma/Makefile
+++ b/trunk/drivers/dma/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/trunk/drivers/dma/amba-pl08x.c b/trunk/drivers/dma/amba-pl08x.c
index 8a281584458b..0698695e8bf9 100644
--- a/trunk/drivers/dma/amba-pl08x.c
+++ b/trunk/drivers/dma/amba-pl08x.c
@@ -854,10 +854,8 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
int ret;
/* Check if we already have a channel */
- if (plchan->phychan) {
- ch = plchan->phychan;
- goto got_channel;
- }
+ if (plchan->phychan)
+ return 0;
ch = pl08x_get_phy_channel(pl08x, plchan);
if (!ch) {
@@ -882,22 +880,21 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
+
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_TO_DEVICE)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_FROM_DEVICE)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
- plchan->phychan = ch;
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
ch->id,
ch->signal,
plchan->name);
-got_channel:
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_MEM_TO_DEV)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_DEV_TO_MEM)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
-
plchan->phychan_hold++;
+ plchan->phychan = ch;
return 0;
}
@@ -1105,10 +1102,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
- if (config->direction == DMA_MEM_TO_DEV) {
+ if (config->direction == DMA_TO_DEVICE) {
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
+ } else if (config->direction == DMA_FROM_DEVICE) {
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1139,7 +1136,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
+ if (plchan->runtime_direction == DMA_FROM_DEVICE) {
plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
pl08x_select_bus(plchan->cd->periph_buses,
@@ -1155,7 +1152,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
- (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
+ (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
addr_width,
maxburst,
cctl);
@@ -1325,7 +1322,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1357,10 +1354,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
txd->direction = direction;
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
txd->cctl = plchan->dst_cctl;
slave_addr = plchan->dst_addr;
- } else if (direction == DMA_DEV_TO_MEM) {
+ } else if (direction == DMA_FROM_DEVICE) {
txd->cctl = plchan->src_cctl;
slave_addr = plchan->src_addr;
} else {
@@ -1371,10 +1368,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
if (plchan->cd->device_fc)
- tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
+ tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
- tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
+ tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1390,7 +1387,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
list_add_tail(&dsg->node, &txd->dsg_list);
dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
dsg->src_addr = sg_phys(sg);
dsg->dst_addr = slave_addr;
} else {
diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c
index 97f87b29b9f3..fcfa0a8b5c59 100644
--- a/trunk/drivers/dma/at_hdmac.c
+++ b/trunk/drivers/dma/at_hdmac.c
@@ -23,8 +23,6 @@
#include
#include
#include
-#include
-#include
#include "at_hdmac_regs.h"
@@ -662,7 +660,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
*/
static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
@@ -680,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
sg_len,
- direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
flags);
if (unlikely(!atslave || !sg_len)) {
@@ -694,7 +692,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb = ATC_IEN;
switch (direction) {
- case DMA_MEM_TO_DEV:
+ case DMA_TO_DEVICE:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
@@ -727,7 +725,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
total_len += len;
}
break;
- case DMA_DEV_TO_MEM:
+ case DMA_FROM_DEVICE:
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
@@ -789,7 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -797,7 +795,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
goto err_out;
return 0;
@@ -812,7 +810,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
static int
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
u32 ctrla;
unsigned int reg_width = atslave->reg_width;
@@ -824,7 +822,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| period_len >> reg_width;
switch (direction) {
- case DMA_MEM_TO_DEV:
+ case DMA_TO_DEVICE:
desc->lli.saddr = buf_addr + (period_len * period_index);
desc->lli.daddr = atslave->tx_reg;
desc->lli.ctrla = ctrla;
@@ -835,7 +833,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| ATC_DIF(AT_DMA_PER_IF);
break;
- case DMA_DEV_TO_MEM:
+ case DMA_FROM_DEVICE:
desc->lli.saddr = atslave->rx_reg;
desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla;
@@ -863,7 +861,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
@@ -874,7 +872,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int i;
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
- direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
buf_addr,
periods, buf_len, period_len);
@@ -1177,56 +1175,6 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*-- Module Management -----------------------------------------------*/
-/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
-static struct at_dma_platform_data at91sam9rl_config = {
- .nr_channels = 2,
-};
-static struct at_dma_platform_data at91sam9g45_config = {
- .nr_channels = 8,
-};
-
-#if defined(CONFIG_OF)
-static const struct of_device_id atmel_dma_dt_ids[] = {
- {
- .compatible = "atmel,at91sam9rl-dma",
- .data = &at91sam9rl_config,
- }, {
- .compatible = "atmel,at91sam9g45-dma",
- .data = &at91sam9g45_config,
- }, {
- /* sentinel */
- }
-};
-
-MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
-#endif
-
-static const struct platform_device_id atdma_devtypes[] = {
- {
- .name = "at91sam9rl_dma",
- .driver_data = (unsigned long) &at91sam9rl_config,
- }, {
- .name = "at91sam9g45_dma",
- .driver_data = (unsigned long) &at91sam9g45_config,
- }, {
- /* sentinel */
- }
-};
-
-static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
- struct platform_device *pdev)
-{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
- if (match == NULL)
- return NULL;
- return match->data;
- }
- return (struct at_dma_platform_data *)
- platform_get_device_id(pdev)->driver_data;
-}
-
/**
* at_dma_off - disable DMA controller
* @atdma: the Atmel HDAMC device
@@ -1245,23 +1193,18 @@ static void at_dma_off(struct at_dma *atdma)
static int __init at_dma_probe(struct platform_device *pdev)
{
+ struct at_dma_platform_data *pdata;
struct resource *io;
struct at_dma *atdma;
size_t size;
int irq;
int err;
int i;
- struct at_dma_platform_data *plat_dat;
-
- /* setup platform data for each SoC */
- dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
- dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
- dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
- /* get DMA parameters from controller type */
- plat_dat = at_dma_get_driver_data(pdev);
- if (!plat_dat)
- return -ENODEV;
+ /* get DMA Controller parameters from platform */
+ pdata = pdev->dev.platform_data;
+ if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
+ return -EINVAL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
@@ -1272,14 +1215,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
return irq;
size = sizeof(struct at_dma);
- size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
+ size += pdata->nr_channels * sizeof(struct at_dma_chan);
atdma = kzalloc(size, GFP_KERNEL);
if (!atdma)
return -ENOMEM;
- /* discover transaction capabilities */
- atdma->dma_common.cap_mask = plat_dat->cap_mask;
- atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
+ /* discover transaction capabilites from the platform data */
+ atdma->dma_common.cap_mask = pdata->cap_mask;
+ atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1325,7 +1268,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < plat_dat->nr_channels; i++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
@@ -1370,7 +1313,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- plat_dat->nr_channels);
+ pdata->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1552,11 +1495,9 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
static struct platform_driver at_dma_driver = {
.remove = __exit_p(at_dma_remove),
.shutdown = at_dma_shutdown,
- .id_table = atdma_devtypes,
.driver = {
.name = "at_hdmac",
.pm = &at_dma_dev_pm_ops,
- .of_match_table = of_match_ptr(atmel_dma_dt_ids),
},
};
diff --git a/trunk/drivers/dma/at_hdmac_regs.h b/trunk/drivers/dma/at_hdmac_regs.h
index dcaedfc181cf..aa4c9aebab7c 100644
--- a/trunk/drivers/dma/at_hdmac_regs.h
+++ b/trunk/drivers/dma/at_hdmac_regs.h
@@ -251,7 +251,6 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @chan_common: common dmaengine dma_device object members
- * @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
diff --git a/trunk/drivers/dma/coh901318.c b/trunk/drivers/dma/coh901318.c
index d65a718c0f9b..4234f416ef11 100644
--- a/trunk/drivers/dma/coh901318.c
+++ b/trunk/drivers/dma/coh901318.c
@@ -39,7 +39,7 @@ struct coh901318_desc {
struct scatterlist *sg;
unsigned int sg_len;
struct coh901318_lli *lli;
- enum dma_transfer_direction dir;
+ enum dma_data_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_last |= cohc->runtime_ctrl;
ctrl |= cohc->runtime_ctrl;
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
- } else if (direction == DMA_DEV_TO_MEM) {
+ } else if (direction == DMA_FROM_DEVICE) {
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_DEV_TO_MEM) {
+ if (config->direction == DMA_FROM_DEVICE) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_MEM_TO_DEV) {
+ } else if (config->direction == DMA_TO_DEVICE) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
diff --git a/trunk/drivers/dma/coh901318_lli.c b/trunk/drivers/dma/coh901318_lli.c
index 6c0e2d4c6682..9f7e0e6a7eea 100644
--- a/trunk/drivers/dma/coh901318_lli.c
+++ b/trunk/drivers/dma/coh901318_lli.c
@@ -7,10 +7,11 @@
* Author: Per Friden
*/
+#include
#include
+#include
#include
#include
-#include
#include
#include "coh901318_lli.h"
@@ -176,18 +177,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_transfer_direction dir)
+ enum dma_data_direction dir)
{
int s = size;
dma_addr_t src;
dma_addr_t dst;
- if (dir == DMA_MEM_TO_DEV) {
+ if (dir == DMA_TO_DEVICE) {
src = buf;
dst = dev_addr;
- } else if (dir == DMA_DEV_TO_MEM) {
+ } else if (dir == DMA_FROM_DEVICE) {
src = dev_addr;
dst = buf;
@@ -214,9 +215,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli = coh901318_lli_next(lli);
- if (dir == DMA_MEM_TO_DEV)
+ if (dir == DMA_TO_DEVICE)
src += block_size;
- else if (dir == DMA_DEV_TO_MEM)
+ else if (dir == DMA_FROM_DEVICE)
dst += block_size;
}
@@ -233,7 +234,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sgl, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
u32 ctrl_last,
- enum dma_transfer_direction dir, u32 ctrl_irq_mask)
+ enum dma_data_direction dir, u32 ctrl_irq_mask)
{
int i;
struct scatterlist *sg;
@@ -248,9 +249,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
spin_lock(&pool->lock);
- if (dir == DMA_MEM_TO_DEV)
+ if (dir == DMA_TO_DEVICE)
dst = dev_addr;
- else if (dir == DMA_DEV_TO_MEM)
+ else if (dir == DMA_FROM_DEVICE)
src = dev_addr;
else
goto err;
@@ -268,7 +269,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if (dir == DMA_MEM_TO_DEV)
+ if (dir == DMA_TO_DEVICE)
/* increment source address */
src = sg_phys(sg);
else
@@ -292,7 +293,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- if (dir == DMA_DEV_TO_MEM)
+ if (dir == DMA_FROM_DEVICE)
dst += elem_size;
else
src += elem_size;
diff --git a/trunk/drivers/dma/coh901318_lli.h b/trunk/drivers/dma/coh901318_lli.h
index abff3714fdda..7a5c80990e9e 100644
--- a/trunk/drivers/dma/coh901318_lli.h
+++ b/trunk/drivers/dma/coh901318_lli.h
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_transfer_direction dir);
+ enum dma_data_direction dir);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sg, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained,
u32 ctrl, u32 ctrl_last,
- enum dma_transfer_direction dir, u32 ctrl_irq_mask);
+ enum dma_data_direction dir, u32 ctrl_irq_mask);
#endif /* COH901318_LLI_H */
diff --git a/trunk/drivers/dma/dmaengine.c b/trunk/drivers/dma/dmaengine.c
index a6c6051ec858..b48967b499da 100644
--- a/trunk/drivers/dma/dmaengine.c
+++ b/trunk/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
+ BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+ !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
- BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
- !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/trunk/drivers/dma/dw_dmac.c b/trunk/drivers/dma/dw_dmac.c
index 9b592b02b5f4..9bfd6d360718 100644
--- a/trunk/drivers/dma/dw_dmac.c
+++ b/trunk/drivers/dma/dw_dmac.c
@@ -166,38 +166,6 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
return cookie;
}
-static void dwc_initialize(struct dw_dma_chan *dwc)
-{
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
- u32 cfghi = DWC_CFGH_FIFO_MODE;
- u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
-
- if (dwc->initialized == true)
- return;
-
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi = dws->cfg_hi;
- cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
- }
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-
- /* Enable interrupts */
- channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
- dwc->initialized = true;
-}
-
/*----------------------------------------------------------------------*/
/* Called with dwc->lock held and bh disabled */
@@ -221,8 +189,6 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
return;
}
- dwc_initialize(dwc);
-
channel_writel(dwc, LLP, first->txd.phys);
channel_writel(dwc, CTL_LO,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -730,7 +696,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -754,7 +720,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev = first = NULL;
switch (direction) {
- case DMA_MEM_TO_DEV:
+ case DMA_TO_DEVICE:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
@@ -811,7 +777,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto slave_sg_todev_fill_desc;
}
break;
- case DMA_DEV_TO_MEM:
+ case DMA_FROM_DEVICE:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
@@ -993,7 +959,10 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
+ struct dw_dma_slave *dws;
int i;
+ u32 cfghi;
+ u32 cfglo;
unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -1006,6 +975,26 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dwc->completed = chan->cookie = 1;
+ cfghi = DWC_CFGH_FIFO_MODE;
+ cfglo = 0;
+
+ dws = chan->private;
+ if (dws) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+ cfghi = dws->cfg_hi;
+ cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ }
+
+ cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+
/*
* NOTE: some controllers may have additional features that we
* need to initialize here, like "scatter-gather" (which
@@ -1037,6 +1026,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
i = ++dwc->descs_allocated;
}
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
@@ -1064,7 +1058,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
- dwc->initialized = false;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1172,7 +1165,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
*/
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
+ enum dma_data_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_cyclic_desc *cdesc;
@@ -1213,7 +1206,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1235,7 +1228,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err_desc_get;
switch (direction) {
- case DMA_MEM_TO_DEV:
+ case DMA_TO_DEVICE:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1246,7 +1239,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
| DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
- case DMA_DEV_TO_MEM:
+ case DMA_FROM_DEVICE:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1342,8 +1335,6 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
static void dw_dma_off(struct dw_dma *dw)
{
- int i;
-
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1354,9 +1345,6 @@ static void dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
-
- for (i = 0; i < dw->dma.chancnt; i++)
- dw->chan[i].initialized = false;
}
static int __init dw_probe(struct platform_device *pdev)
@@ -1545,7 +1533,6 @@ static int dw_suspend_noirq(struct device *dev)
dw_dma_off(platform_get_drvdata(pdev));
clk_disable(dw->clk);
-
return 0;
}
diff --git a/trunk/drivers/dma/dw_dmac_regs.h b/trunk/drivers/dma/dw_dmac_regs.h
index 5eef6946a367..c3419518d701 100644
--- a/trunk/drivers/dma/dw_dmac_regs.h
+++ b/trunk/drivers/dma/dw_dmac_regs.h
@@ -140,7 +140,6 @@ struct dw_dma_chan {
u8 mask;
u8 priority;
bool paused;
- bool initialized;
spinlock_t lock;
diff --git a/trunk/drivers/dma/ep93xx_dma.c b/trunk/drivers/dma/ep93xx_dma.c
index 59e7a965772b..b47e2b803faf 100644
--- a/trunk/drivers/dma/ep93xx_dma.c
+++ b/trunk/drivers/dma/ep93xx_dma.c
@@ -246,9 +246,6 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
- if (list_empty(&edmac->active))
- return NULL;
-
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
}
@@ -266,22 +263,16 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
*/
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc;
-
list_rotate_left(&edmac->active);
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
return true;
- desc = ep93xx_dma_get_active(edmac);
- if (!desc)
- return false;
-
/*
* If txd.cookie is set it means that we are back in the first
* descriptor in the chain and hence done with it.
*/
- return !desc->txd.cookie;
+ return !ep93xx_dma_get_active(edmac)->txd.cookie;
}
/*
@@ -336,16 +327,10 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc;
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
u32 bus_addr;
- desc = ep93xx_dma_get_active(edmac);
- if (!desc) {
- dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
- return;
- }
-
- if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
bus_addr = desc->src_addr;
else
bus_addr = desc->dst_addr;
@@ -458,7 +443,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_MEM_TO_DEV) {
+ if (data->direction == DMA_TO_DEVICE) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -474,7 +459,11 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- if (data->direction == DMA_MEM_TO_DEV) {
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
+
+ if (data->direction == DMA_TO_DEVICE) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -484,10 +473,6 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_SAH;
control |= M2M_CONTROL_TM_RX;
}
-
- control |= M2M_CONTROL_NO_HDSK;
- control |= M2M_CONTROL_RSS_IDE;
- control |= M2M_CONTROL_PW_16;
break;
default:
@@ -506,13 +491,7 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc;
-
- desc = ep93xx_dma_get_active(edmac);
- if (!desc) {
- dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
- return;
- }
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
if (edmac->buffer == 0) {
writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -690,30 +669,24 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback = NULL;
- void *callback_param = NULL;
+ dma_async_tx_callback callback;
+ void *callback_param;
LIST_HEAD(list);
spin_lock_irq(&edmac->lock);
- /*
- * If dma_terminate_all() was called before we get to run, the active
- * list has become empty. If that happens we aren't supposed to do
- * anything more than call ep93xx_dma_advance_work().
- */
desc = ep93xx_dma_get_active(edmac);
- if (desc) {
- if (desc->complete) {
- edmac->last_completed = desc->txd.cookie;
- list_splice_init(&edmac->active, &list);
- }
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
}
spin_unlock_irq(&edmac->lock);
/* Pick up the next descriptor from the queue */
ep93xx_dma_advance_work(edmac);
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
/*
@@ -733,22 +706,13 @@ static void ep93xx_dma_tasklet(unsigned long data)
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
{
struct ep93xx_dma_chan *edmac = dev_id;
- struct ep93xx_dma_desc *desc;
irqreturn_t ret = IRQ_HANDLED;
spin_lock(&edmac->lock);
- desc = ep93xx_dma_get_active(edmac);
- if (!desc) {
- dev_warn(chan2dev(edmac),
- "got interrupt while active list is empty\n");
- spin_unlock(&edmac->lock);
- return IRQ_NONE;
- }
-
switch (edmac->edma->hw_interrupt(edmac)) {
case INTERRUPT_DONE:
- desc->complete = true;
+ ep93xx_dma_get_active(edmac)->complete = true;
tasklet_schedule(&edmac->tasklet);
break;
@@ -839,8 +803,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_MEM_TO_DEV &&
- data->direction != DMA_DEV_TO_MEM)
+ if (data->direction != DMA_TO_DEVICE &&
+ data->direction != DMA_FROM_DEVICE)
return -EINVAL;
break;
default:
@@ -988,7 +952,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
*/
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned int sg_len, enum dma_data_direction dir,
unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -1024,7 +988,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto fail;
}
- if (dir == DMA_MEM_TO_DEV) {
+ if (dir == DMA_TO_DEVICE) {
desc->src_addr = sg_dma_address(sg);
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1068,7 +1032,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction dir)
+ enum dma_data_direction dir)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1101,7 +1065,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
goto fail;
}
- if (dir == DMA_MEM_TO_DEV) {
+ if (dir == DMA_TO_DEVICE) {
desc->src_addr = dma_addr + offset;
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1169,12 +1133,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
return -EINVAL;
switch (config->direction) {
- case DMA_DEV_TO_MEM:
+ case DMA_FROM_DEVICE:
width = config->src_addr_width;
addr = config->src_addr;
break;
- case DMA_MEM_TO_DEV:
+ case DMA_TO_DEVICE:
width = config->dst_addr_width;
addr = config->dst_addr;
break;
diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c
index b98070c33ca9..8a781540590c 100644
--- a/trunk/drivers/dma/fsldma.c
+++ b/trunk/drivers/dma/fsldma.c
@@ -772,7 +772,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_data_direction direction, unsigned long flags)
{
/*
* This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
return -ENXIO;
/* we set the controller burst size depending on direction */
- if (config->direction == DMA_MEM_TO_DEV)
+ if (config->direction == DMA_TO_DEVICE)
size = config->dst_addr_width * config->dst_maxburst;
else
size = config->src_addr_width * config->src_maxburst;
diff --git a/trunk/drivers/dma/imx-dma.c b/trunk/drivers/dma/imx-dma.c
index e4383ee2c9ac..4be55f9bb6c1 100644
--- a/trunk/drivers/dma/imx-dma.c
+++ b/trunk/drivers/dma/imx-dma.c
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
imx_dma_disable(imxdmac->imxdma_channel);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg->length;
}
- if (direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_FROM_DEVICE)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
- if (direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_FROM_DEVICE)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
diff --git a/trunk/drivers/dma/imx-sdma.c b/trunk/drivers/dma/imx-sdma.c
index a8af379680c1..f993955a640c 100644
--- a/trunk/drivers/dma/imx-sdma.c
+++ b/trunk/drivers/dma/imx-sdma.c
@@ -247,7 +247,7 @@ struct sdma_engine;
struct sdma_channel {
struct sdma_engine *sdma;
unsigned int channel;
- enum dma_transfer_direction direction;
+ enum dma_data_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
@@ -268,8 +268,6 @@ struct sdma_channel {
struct dma_async_tx_descriptor desc;
dma_cookie_t last_completed;
enum dma_status status;
- unsigned int chn_count;
- unsigned int chn_real_count;
};
#define IMX_DMA_SG_LOOP (1 << 0)
@@ -505,7 +503,6 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd;
int i, error = 0;
- sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
@@ -515,7 +512,6 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
- sdmac->chn_real_count += bd->mode.count;
}
if (error)
@@ -523,9 +519,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
else
sdmac->status = DMA_SUCCESS;
- sdmac->last_completed = sdmac->desc.cookie;
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
+ sdmac->last_completed = sdmac->desc.cookie;
}
static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -654,7 +650,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
- if (sdmac->direction == DMA_DEV_TO_MEM) {
+ if (sdmac->direction == DMA_FROM_DEVICE) {
load_address = sdmac->pc_from_device;
} else {
load_address = sdmac->pc_to_device;
@@ -836,18 +832,17 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- unsigned long flags;
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
struct sdma_engine *sdma = sdmac->sdma;
dma_cookie_t cookie;
- spin_lock_irqsave(&sdmac->lock, flags);
+ spin_lock_irq(&sdmac->lock);
cookie = sdma_assign_cookie(sdmac);
sdma_enable_channel(sdma, sdmac->channel);
- spin_unlock_irqrestore(&sdmac->lock, flags);
+ spin_unlock_irq(&sdmac->lock);
return cookie;
}
@@ -916,7 +911,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -946,7 +941,6 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
goto err_out;
}
- sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
int param;
@@ -963,7 +957,6 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
}
bd->mode.count = count;
- sdmac->chn_count += count;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
ret = -EINVAL;
@@ -1015,7 +1008,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1100,7 +1093,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdma_disable_channel(sdmac);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
sdmac->per_address = dmaengine_cfg->src_addr;
sdmac->watermark_level = dmaengine_cfg->src_maxburst;
sdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -1109,7 +1102,6 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
}
- sdmac->direction = dmaengine_cfg->direction;
return sdma_config_channel(sdmac);
default:
return -ENOSYS;
@@ -1127,8 +1119,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
last_used = chan->cookie;
- dma_set_tx_state(txstate, sdmac->last_completed, last_used,
- sdmac->chn_count - sdmac->chn_real_count);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
return sdmac->status;
}
diff --git a/trunk/drivers/dma/intel_mid_dma.c b/trunk/drivers/dma/intel_mid_dma.c
index 74f70aadf9e4..19a0c64d45d3 100644
--- a/trunk/drivers/dma/intel_mid_dma.c
+++ b/trunk/drivers/dma/intel_mid_dma.c
@@ -280,8 +280,7 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
* callbacks but must be called with the lock held.
*/
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
- struct intel_mid_dma_desc *desc)
- __releases(&midc->lock) __acquires(&midc->lock)
+ struct intel_mid_dma_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
@@ -312,7 +311,6 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
- desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
midc->busy = false;
@@ -397,10 +395,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
midc->dma->block_size);
/*Populate SAR and DAR values*/
sg_phy_addr = sg_phys(sg);
- if (desc->dirn == DMA_MEM_TO_DEV) {
+ if (desc->dirn == DMA_TO_DEVICE) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
- } else if (desc->dirn == DMA_DEV_TO_MEM) {
+ } else if (desc->dirn == DMA_FROM_DEVICE) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
}
@@ -492,9 +490,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
- spin_unlock_bh(&midc->lock);
last_complete = midc->completed;
last_used = chan->cookie;
@@ -570,7 +566,6 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
- desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
}
@@ -637,13 +632,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+ if (mids->dma_slave.direction == DMA_TO_DEVICE) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+ } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -687,11 +682,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
+ if (mids->dma_slave.direction == DMA_TO_DEVICE) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
+ } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -737,7 +732,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct intel_mid_dma_chan *midc = NULL;
@@ -873,7 +868,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_get_sync(&mid->pdev->dev);
if (mid->state == SUSPENDED) {
- if (dma_resume(&mid->pdev->dev)) {
+ if (dma_resume(mid->pdev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
@@ -1104,8 +1099,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
LNW_PERIPHRAL_MASK_SIZE);
if (dma->mask_reg == NULL) {
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
- err = -ENOMEM;
- goto err_ioremap;
+ return -ENOMEM;
}
} else
dma->mask_reg = NULL;
@@ -1202,9 +1196,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
err_engine:
free_irq(pdev->irq, dma);
err_irq:
- if (dma->mask_reg)
- iounmap(dma->mask_reg);
-err_ioremap:
pci_pool_destroy(dma->dma_pool);
err_dma_pool:
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1346,9 +1337,8 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
*
* This function is called by OS when a power event occurs
*/
-static int dma_suspend(struct device *dev)
+int dma_suspend(struct pci_dev *pci, pm_message_t state)
{
- struct pci_dev *pci = to_pci_dev(dev);
int i;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_suspend called\n");
@@ -1372,9 +1362,8 @@ static int dma_suspend(struct device *dev)
*
* This function is called by OS when a power event occurs
*/
-int dma_resume(struct device *dev)
+int dma_resume(struct pci_dev *pci)
{
- struct pci_dev *pci = to_pci_dev(dev);
int ret;
struct middma_device *device = pci_get_drvdata(pci);
@@ -1440,8 +1429,6 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
.runtime_suspend = dma_runtime_suspend,
.runtime_resume = dma_runtime_resume,
.runtime_idle = dma_runtime_idle,
- .suspend = dma_suspend,
- .resume = dma_resume,
};
static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1450,6 +1437,8 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
#ifdef CONFIG_PM
+ .suspend = dma_suspend,
+ .resume = dma_resume,
.driver = {
.pm = &intel_mid_dma_pm,
},
diff --git a/trunk/drivers/dma/intel_mid_dma_regs.h b/trunk/drivers/dma/intel_mid_dma_regs.h
index c83d35b97bd8..aea5ee88ce03 100644
--- a/trunk/drivers/dma/intel_mid_dma_regs.h
+++ b/trunk/drivers/dma/intel_mid_dma_regs.h
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
unsigned int lli_length;
unsigned int current_lli;
dma_addr_t next;
- enum dma_transfer_direction dirn;
+ enum dma_data_direction dirn;
enum dma_status status;
enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
}
-int dma_resume(struct device *dev);
+int dma_resume(struct pci_dev *pci);
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/trunk/drivers/dma/iop-adma.c b/trunk/drivers/dma/iop-adma.c
index 04be90b645b8..e03f811a83dd 100644
--- a/trunk/drivers/dma/iop-adma.c
+++ b/trunk/drivers/dma/iop-adma.c
@@ -1735,6 +1735,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
+MODULE_ALIAS("platform:iop-adma");
+
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = __devexit_p(iop_adma_remove),
@@ -1744,9 +1746,19 @@ static struct platform_driver iop_adma_driver = {
},
};
-module_platform_driver(iop_adma_driver);
+static int __init iop_adma_init (void)
+{
+ return platform_driver_register(&iop_adma_driver);
+}
+
+static void __exit iop_adma_exit (void)
+{
+ platform_driver_unregister(&iop_adma_driver);
+ return;
+}
+module_exit(iop_adma_exit);
+module_init(iop_adma_init);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("IOP ADMA Engine Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:iop-adma");
diff --git a/trunk/drivers/dma/ipu/ipu_idmac.c b/trunk/drivers/dma/ipu/ipu_idmac.c
index 6212b16e8cf2..0e5ef33f90a1 100644
--- a/trunk/drivers/dma/ipu/ipu_idmac.c
+++ b/trunk/drivers/dma/ipu/ipu_idmac.c
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
case IPU_PIX_FMT_RGB565:
params->ip.bpp = 2;
params->ip.pfs = 4;
- params->ip.npb = 15;
+ params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 5; /* Green bit offset */
@@ -422,6 +422,12 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->pp.nsb = 1;
}
+static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
+ uint16_t burst_pixels)
+{
+ params->pp.npb = burst_pixels - 1;
+}
+
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
@@ -684,6 +690,23 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes);
ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1);
ipu_ch_param_set_rotation(¶ms, rot_mode);
+ /* Some channels (rotation) have restriction on burst length */
+ switch (channel) {
+ case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
+ invalid - Table 44-30 */
+/*
+ ipu_ch_param_set_burst_size(¶ms, 8);
+ */
+ break;
+ case IDMAC_SDC_0:
+ case IDMAC_SDC_1:
+ /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
+ ipu_ch_param_set_burst_size(¶ms, 16);
+ break;
+ case IDMAC_IC_0:
+ default:
+ break;
+ }
spin_lock_irqsave(&ipu->lock, flags);
@@ -1341,7 +1364,7 @@ static void ipu_gc_tasklet(unsigned long arg)
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long tx_flags)
+ enum dma_data_direction direction, unsigned long tx_flags)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
@@ -1353,7 +1376,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
+ if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/trunk/drivers/dma/mpc512x_dma.c b/trunk/drivers/dma/mpc512x_dma.c
index 4d6d4cf66949..8ba4edc6185e 100644
--- a/trunk/drivers/dma/mpc512x_dma.c
+++ b/trunk/drivers/dma/mpc512x_dma.c
@@ -835,7 +835,17 @@ static struct platform_driver mpc_dma_driver = {
},
};
-module_platform_driver(mpc_dma_driver);
+static int __init mpc_dma_init(void)
+{
+ return platform_driver_register(&mpc_dma_driver);
+}
+module_init(mpc_dma_init);
+
+static void __exit mpc_dma_exit(void)
+{
+ platform_driver_unregister(&mpc_dma_driver);
+}
+module_exit(mpc_dma_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik ");
diff --git a/trunk/drivers/dma/mxs-dma.c b/trunk/drivers/dma/mxs-dma.c
index b06cd4ca626f..fc903c0ed234 100644
--- a/trunk/drivers/dma/mxs-dma.c
+++ b/trunk/drivers/dma/mxs-dma.c
@@ -44,6 +44,7 @@
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
+#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
#define BP_APBH_CTRL0_RESET_CHANNEL 16
#define HW_APBHX_CTRL1 0x010
#define HW_APBHX_CTRL2 0x020
@@ -110,7 +111,6 @@ struct mxs_dma_chan {
int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
- int desc_count;
dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
+static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+ int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
+
+ /* enable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+ }
+}
+
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,6 +165,9 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
+ /* clkgate needs to be enabled before writing other registers */
+ mxs_dma_clkgate(mxs_chan, 1);
+
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -158,6 +178,9 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
+ /* disable apbh channel clock */
+ mxs_dma_clkgate(mxs_chan, 0);
+
mxs_chan->status = DMA_SUCCESS;
}
@@ -245,7 +268,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handle here in case of either it's (1) a bus
+ * an error we need to handler here in case of ether it's (1) an bus
* error or (2) a termination error with no completion.
*/
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -315,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto err_clk;
+ /* clkgate needs to be enabled for reset to finish */
+ mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -351,7 +377,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long append)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -360,7 +386,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
- int idx = append ? mxs_chan->desc_count : 0;
+ static int idx;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
return NULL;
@@ -391,7 +417,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
idx = 0;
}
- if (direction == DMA_TRANS_NONE) {
+ if (direction == DMA_NONE) {
ccw = &mxs_chan->ccw[idx++];
pio = (u32 *) sgl;
@@ -424,7 +450,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
+ ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
COMMAND);
@@ -436,7 +462,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
}
}
}
- mxs_chan->desc_count = idx;
return &mxs_chan->desc;
@@ -447,7 +472,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -490,7 +515,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
+ ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
dma_addr += period_len;
@@ -498,7 +523,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
i++;
}
- mxs_chan->desc_count = i;
return &mxs_chan->desc;
@@ -515,8 +539,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- mxs_dma_reset_chan(mxs_chan);
mxs_dma_disable_chan(mxs_chan);
+ mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -556,7 +580,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
- return ret;
+ goto err_out;
ret = mxs_reset_block(mxs_dma->base);
if (ret)
@@ -580,8 +604,11 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
-err_out:
clk_disable_unprepare(mxs_dma->clk);
+
+ return 0;
+
+err_out:
return ret;
}
diff --git a/trunk/drivers/dma/pch_dma.c b/trunk/drivers/dma/pch_dma.c
index 823f58179f9d..a6d0e3dbed07 100644
--- a/trunk/drivers/dma/pch_dma.c
+++ b/trunk/drivers/dma/pch_dma.c
@@ -1,7 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
struct pch_dma_chan {
struct dma_chan chan;
void __iomem *membase;
- enum dma_transfer_direction dir;
+ enum dma_data_direction dir;
struct tasklet_struct tasklet;
unsigned long err_status;
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
- if (pd_chan->dir == DMA_MEM_TO_DEV)
+ if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
- if (pd_chan->dir == DMA_MEM_TO_DEV)
+ if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_data_direction direction, unsigned long flags)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
return NULL;
}
- if (direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_FROM_DEVICE)
reg = pd_slave->rx_reg;
- else if (direction == DMA_MEM_TO_DEV)
+ else if (direction == DMA_TO_DEVICE)
reg = pd_slave->tx_reg;
else
return NULL;
@@ -1018,8 +1018,6 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
-#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
-#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1032,8 +1030,6 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
- { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
- { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
{ 0, },
};
@@ -1061,7 +1057,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang ");
MODULE_LICENSE("GPL v2");
diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c
index b8ec03ee8e22..09adcfcd953e 100644
--- a/trunk/drivers/dma/pl330.c
+++ b/trunk/drivers/dma/pl330.c
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
- if (slave_config->direction == DMA_MEM_TO_DEV) {
+ if (slave_config->direction == DMA_TO_DEVICE) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
- } else if (slave_config->direction == DMA_DEV_TO_MEM) {
+ } else if (slave_config->direction == DMA_FROM_DEVICE) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
- size_t period_len, enum dma_transfer_direction direction)
+ size_t period_len, enum dma_data_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
}
switch (direction) {
- case DMA_MEM_TO_DEV:
+ case DMA_TO_DEVICE:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
- case DMA_DEV_TO_MEM:
+ case DMA_FROM_DEVICE:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
desc->req.rqtype = DEVTOMEM;
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flg)
{
struct dma_pl330_desc *first, *desc = NULL;
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
else
list_add_tail(&desc->node, &first->node);
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
@@ -834,7 +834,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
-#ifndef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM_RUNTIME
+ /* to use the runtime PM helper functions */
+ pm_runtime_enable(&adev->dev);
+
+ /* enable the power domain */
+ if (pm_runtime_get_sync(&adev->dev)) {
+ dev_err(&adev->dev, "failed to get runtime pm\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+#else
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
@@ -967,7 +977,10 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
-#ifndef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_put(&adev->dev);
+ pm_runtime_disable(&adev->dev);
+#else
clk_disable(pdmac->clk);
#endif
diff --git a/trunk/drivers/dma/shdma.c b/trunk/drivers/dma/shdma.c
index 54043cd831c8..81809c2b46ab 100644
--- a/trunk/drivers/dma/shdma.c
+++ b/trunk/drivers/dma/shdma.c
@@ -23,6 +23,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -56,15 +57,6 @@ static LIST_HEAD(sh_dmae_devices);
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-
- __raw_writel(data, shdev->chan_reg +
- shdev->pdata->channel[sh_dc->id].chclr_offset);
-}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -137,15 +129,6 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
- if (shdev->pdata->chclr_present) {
- int i;
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- if (sh_chan)
- chclr_write(sh_chan, 0);
- }
- }
-
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
dmaor = dmaor_read(shdev);
@@ -156,10 +139,6 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
return -EIO;
}
- if (shdev->pdata->dmaor_init & ~dmaor)
- dev_warn(shdev->common.dev,
- "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
- dmaor, shdev->pdata->dmaor_init);
return 0;
}
@@ -280,6 +259,8 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -359,8 +340,6 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
- } else {
- sh_chan->pm_state = DMAE_PM_PENDING;
}
spin_unlock_irq(&sh_chan->desc_lock);
@@ -500,19 +479,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
* @sh_chan: DMA channel
* @flags: DMA transfer flags
* @dest: destination DMA address, incremented when direction equals
- * DMA_DEV_TO_MEM
+ * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
* @src: source DMA address, incremented when direction equals
- * DMA_MEM_TO_DEV
+ * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
* @len: DMA transfer length
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
* @direction: needed for slave DMA to decide which address to keep constant,
- * equals DMA_MEM_TO_MEM for MEMCPY
+ * equals DMA_BIDIRECTIONAL for MEMCPY
* Returns 0 or an error
* Locks: called with desc_lock held
*/
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
- struct sh_desc **first, enum dma_transfer_direction direction)
+ struct sh_desc **first, enum dma_data_direction direction)
{
struct sh_desc *new;
size_t copy_size;
@@ -552,9 +531,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
new->direction = direction;
*len -= copy_size;
- if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
+ if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
*src += copy_size;
- if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
*dest += copy_size;
return new;
@@ -567,12 +546,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
* converted to scatter-gather to guarantee consistent locking and a correct
* list manipulation. For slave DMA direction carries the usual meaning, and,
* logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
* and the SG list contains only one element and points at the source buffer.
*/
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_data_direction direction, unsigned long flags)
{
struct scatterlist *sg;
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -613,7 +592,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
i, sg, len, (unsigned long long)sg_addr);
- if (direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_FROM_DEVICE)
new = sh_dmae_add_desc(sh_chan, flags,
&sg_addr, addr, &len, &first,
direction);
@@ -667,13 +646,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
+ return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
flags);
}
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_data_direction direction, unsigned long flags)
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
@@ -1017,7 +996,7 @@ static void dmae_do_tasklet(unsigned long data)
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
- ((desc->direction == DMA_DEV_TO_MEM &&
+ ((desc->direction == DMA_FROM_DEVICE &&
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1246,8 +1225,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, shdev);
- shdev->common.dev = &pdev->dev;
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1277,6 +1254,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_control = sh_dmae_control;
+ shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
@@ -1457,17 +1435,22 @@ static int sh_dmae_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan->descs_allocated)
+ sh_chan->pm_error = pm_runtime_put_sync(dev);
+ }
+
return 0;
}
static int sh_dmae_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i, ret;
-
- ret = sh_dmae_rst(shdev);
- if (ret < 0)
- dev_err(dev, "Failed to reset!\n");
+ int i;
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1476,6 +1459,9 @@ static int sh_dmae_resume(struct device *dev)
if (!sh_chan->descs_allocated)
continue;
+ if (!sh_chan->pm_error)
+ pm_runtime_get_sync(dev);
+
if (param) {
const struct sh_dmae_slave_config *cfg = param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/trunk/drivers/dma/sirf-dma.c b/trunk/drivers/dma/sirf-dma.c
deleted file mode 100644
index 2333810d1688..000000000000
--- a/trunk/drivers/dma/sirf-dma.c
+++ /dev/null
@@ -1,707 +0,0 @@
-/*
- * DMA controller driver for CSR SiRFprimaII
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define SIRFSOC_DMA_DESCRIPTORS 16
-#define SIRFSOC_DMA_CHANNELS 16
-
-#define SIRFSOC_DMA_CH_ADDR 0x00
-#define SIRFSOC_DMA_CH_XLEN 0x04
-#define SIRFSOC_DMA_CH_YLEN 0x08
-#define SIRFSOC_DMA_CH_CTRL 0x0C
-
-#define SIRFSOC_DMA_WIDTH_0 0x100
-#define SIRFSOC_DMA_CH_VALID 0x140
-#define SIRFSOC_DMA_CH_INT 0x144
-#define SIRFSOC_DMA_INT_EN 0x148
-#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
-
-#define SIRFSOC_DMA_MODE_CTRL_BIT 4
-#define SIRFSOC_DMA_DIR_CTRL_BIT 5
-
-/* xlen and dma_width register is in 4 bytes boundary */
-#define SIRFSOC_DMA_WORD_LEN 4
-
-struct sirfsoc_dma_desc {
- struct dma_async_tx_descriptor desc;
- struct list_head node;
-
- /* SiRFprimaII 2D-DMA parameters */
-
- int xlen; /* DMA xlen */
- int ylen; /* DMA ylen */
- int width; /* DMA width */
- int dir;
- bool cyclic; /* is loop DMA? */
- u32 addr; /* DMA buffer address */
-};
-
-struct sirfsoc_dma_chan {
- struct dma_chan chan;
- struct list_head free;
- struct list_head prepared;
- struct list_head queued;
- struct list_head active;
- struct list_head completed;
- dma_cookie_t completed_cookie;
- unsigned long happened_cyclic;
- unsigned long completed_cyclic;
-
- /* Lock for this structure */
- spinlock_t lock;
-
- int mode;
-};
-
-struct sirfsoc_dma {
- struct dma_device dma;
- struct tasklet_struct tasklet;
- struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
- void __iomem *base;
- int irq;
-};
-
-#define DRV_NAME "sirfsoc_dma"
-
-/* Convert struct dma_chan to struct sirfsoc_dma_chan */
-static inline
-struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
-{
- return container_of(c, struct sirfsoc_dma_chan, chan);
-}
-
-/* Convert struct dma_chan to struct sirfsoc_dma */
-static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
- return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
-}
-
-/* Execute all queued DMA descriptors */
-static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- struct sirfsoc_dma_desc *sdesc = NULL;
-
- /*
- * lock has been held by functions calling this, so we don't hold
- * lock again
- */
-
- sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
- node);
- /* Move the first queued descriptor to active list */
- list_move_tail(&schan->queued, &schan->active);
-
- /* Start the DMA transfer */
- writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
- cid * 4);
- writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
- (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
- sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
- writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
- SIRFSOC_DMA_CH_XLEN);
- writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
- SIRFSOC_DMA_CH_YLEN);
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
- (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
-
- /*
- * writel has an implict memory write barrier to make sure data is
- * flushed into memory before starting DMA
- */
- writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
-
- if (sdesc->cyclic) {
- writel((1 << cid) | 1 << (cid + 16) |
- readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
- schan->happened_cyclic = schan->completed_cyclic = 0;
- }
-}
-
-/* Interrupt handler */
-static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
-{
- struct sirfsoc_dma *sdma = data;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dma_desc *sdesc = NULL;
- u32 is;
- int ch;
-
- is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
- while ((ch = fls(is) - 1) >= 0) {
- is &= ~(1 << ch);
- writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
- schan = &sdma->channels[ch];
-
- spin_lock(&schan->lock);
-
- sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
- node);
- if (!sdesc->cyclic) {
- /* Execute queued descriptors */
- list_splice_tail_init(&schan->active, &schan->completed);
- if (!list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
- } else
- schan->happened_cyclic++;
-
- spin_unlock(&schan->lock);
- }
-
- /* Schedule tasklet */
- tasklet_schedule(&sdma->tasklet);
-
- return IRQ_HANDLED;
-}
-
-/* process completed descriptors */
-static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
-{
- dma_cookie_t last_cookie = 0;
- struct sirfsoc_dma_chan *schan;
- struct sirfsoc_dma_desc *sdesc;
- struct dma_async_tx_descriptor *desc;
- unsigned long flags;
- unsigned long happened_cyclic;
- LIST_HEAD(list);
- int i;
-
- for (i = 0; i < sdma->dma.chancnt; i++) {
- schan = &sdma->channels[i];
-
- /* Get all completed descriptors */
- spin_lock_irqsave(&schan->lock, flags);
- if (!list_empty(&schan->completed)) {
- list_splice_tail_init(&schan->completed, &list);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- /* Execute callbacks and run dependencies */
- list_for_each_entry(sdesc, &list, node) {
- desc = &sdesc->desc;
-
- if (desc->callback)
- desc->callback(desc->callback_param);
-
- last_cookie = desc->cookie;
- dma_run_dependencies(desc);
- }
-
- /* Free descriptors */
- spin_lock_irqsave(&schan->lock, flags);
- list_splice_tail_init(&list, &schan->free);
- schan->completed_cookie = last_cookie;
- spin_unlock_irqrestore(&schan->lock, flags);
- } else {
- /* for cyclic channel, desc is always in active list */
- sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
- node);
-
- if (!sdesc || (sdesc && !sdesc->cyclic)) {
- /* without active cyclic DMA */
- spin_unlock_irqrestore(&schan->lock, flags);
- continue;
- }
-
- /* cyclic DMA */
- happened_cyclic = schan->happened_cyclic;
- spin_unlock_irqrestore(&schan->lock, flags);
-
- desc = &sdesc->desc;
- while (happened_cyclic != schan->completed_cyclic) {
- if (desc->callback)
- desc->callback(desc->callback_param);
- schan->completed_cyclic++;
- }
- }
- }
-}
-
-/* DMA Tasklet */
-static void sirfsoc_dma_tasklet(unsigned long data)
-{
- struct sirfsoc_dma *sdma = (void *)data;
-
- sirfsoc_dma_process_completed(sdma);
-}
-
-/* Submit descriptor to hardware */
-static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
- struct sirfsoc_dma_desc *sdesc;
- unsigned long flags;
- dma_cookie_t cookie;
-
- sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
-
- spin_lock_irqsave(&schan->lock, flags);
-
- /* Move descriptor to queue */
- list_move_tail(&sdesc->node, &schan->queued);
-
- /* Update cookie */
- cookie = schan->chan.cookie + 1;
- if (cookie <= 0)
- cookie = 1;
-
- schan->chan.cookie = cookie;
- sdesc->desc.cookie = cookie;
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return cookie;
-}
-
-static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
- struct dma_slave_config *config)
-{
- unsigned long flags;
-
- if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
- return -EINVAL;
-
- spin_lock_irqsave(&schan->lock, flags);
- schan->mode = (config->src_maxburst == 4 ? 1 : 0);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
- int cid = schan->chan.chan_id;
- unsigned long flags;
-
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
- ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
- writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
-
- writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
- & ~((1 << cid) | 1 << (cid + 16)),
- sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
-
- spin_lock_irqsave(&schan->lock, flags);
- list_splice_tail_init(&schan->active, &schan->free);
- list_splice_tail_init(&schan->queued, &schan->free);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return 0;
-}
-
-static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
- unsigned long arg)
-{
- struct dma_slave_config *config;
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
-
- switch (cmd) {
- case DMA_TERMINATE_ALL:
- return sirfsoc_dma_terminate_all(schan);
- case DMA_SLAVE_CONFIG:
- config = (struct dma_slave_config *)arg;
- return sirfsoc_dma_slave_config(schan, config);
-
- default:
- break;
- }
-
- return -ENOSYS;
-}
-
-/* Alloc channel resources */
-static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc;
- unsigned long flags;
- LIST_HEAD(descs);
- int i;
-
- /* Alloc descriptors for this channel */
- for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
- sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
- if (!sdesc) {
- dev_notice(sdma->dma.dev, "Memory allocation error. "
- "Allocated only %u descriptors\n", i);
- break;
- }
-
- dma_async_tx_descriptor_init(&sdesc->desc, chan);
- sdesc->desc.flags = DMA_CTRL_ACK;
- sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
-
- list_add_tail(&sdesc->node, &descs);
- }
-
- /* Return error only if no descriptors were allocated */
- if (i == 0)
- return -ENOMEM;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- list_splice_tail_init(&descs, &schan->free);
- spin_unlock_irqrestore(&schan->lock, flags);
-
- return i;
-}
-
-/* Free channel resources */
-static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc, *tmp;
- unsigned long flags;
- LIST_HEAD(descs);
-
- spin_lock_irqsave(&schan->lock, flags);
-
- /* Channel must be idle */
- BUG_ON(!list_empty(&schan->prepared));
- BUG_ON(!list_empty(&schan->queued));
- BUG_ON(!list_empty(&schan->active));
- BUG_ON(!list_empty(&schan->completed));
-
- /* Move data */
- list_splice_tail_init(&schan->free, &descs);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-
- /* Free descriptors */
- list_for_each_entry_safe(sdesc, tmp, &descs, node)
- kfree(sdesc);
-}
-
-/* Send pending descriptor to hardware */
-static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&schan->lock, flags);
-
- if (list_empty(&schan->active) && !list_empty(&schan->queued))
- sirfsoc_dma_execute(schan);
-
- spin_unlock_irqrestore(&schan->lock, flags);
-}
-
-/* Check request completion status */
-static enum dma_status
-sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- unsigned long flags;
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
-
- spin_lock_irqsave(&schan->lock, flags);
- last_used = schan->chan.cookie;
- last_complete = schan->completed_cookie;
- spin_unlock_irqrestore(&schan->lock, flags);
-
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- return dma_async_is_complete(cookie, last_complete, last_used);
-}
-
-static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
- struct dma_chan *chan, struct dma_interleaved_template *xt,
- unsigned long flags)
-{
- struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc = NULL;
- unsigned long iflags;
- int ret;
-
- if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
- ret = -EINVAL;
- goto err_dir;
- }
-
- /* Get free descriptor */
- spin_lock_irqsave(&schan->lock, iflags);
- if (!list_empty(&schan->free)) {
- sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
- node);
- list_del(&sdesc->node);
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- if (!sdesc) {
- /* try to free completed descriptors */
- sirfsoc_dma_process_completed(sdma);
- ret = 0;
- goto no_desc;
- }
-
- /* Place descriptor in prepared list */
- spin_lock_irqsave(&schan->lock, iflags);
-
- /*
- * Number of chunks in a frame can only be 1 for prima2
- * and ylen (number of frame - 1) must be at least 0
- */
- if ((xt->frame_size == 1) && (xt->numf > 0)) {
- sdesc->cyclic = 0;
- sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
- sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
- SIRFSOC_DMA_WORD_LEN;
- sdesc->ylen = xt->numf - 1;
- if (xt->dir == DMA_MEM_TO_DEV) {
- sdesc->addr = xt->src_start;
- sdesc->dir = 1;
- } else {
- sdesc->addr = xt->dst_start;
- sdesc->dir = 0;
- }
-
- list_add_tail(&sdesc->node, &schan->prepared);
- } else {
- pr_err("sirfsoc DMA Invalid xfer\n");
- ret = -EINVAL;
- goto err_xfer;
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- return &sdesc->desc;
-err_xfer:
- spin_unlock_irqrestore(&schan->lock, iflags);
-no_desc:
-err_dir:
- return ERR_PTR(ret);
-}
-
-static struct dma_async_tx_descriptor *
-sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
- size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
-{
- struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
- struct sirfsoc_dma_desc *sdesc = NULL;
- unsigned long iflags;
-
- /*
- * we only support cycle transfer with 2 period
- * If the X-length is set to 0, it would be the loop mode.
- * The DMA address keeps increasing until reaching the end of a loop
- * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
- * the DMA address goes back to the beginning of this area.
- * In loop mode, the DMA data region is divided into two parts, BUFA
- * and BUFB. DMA controller generates interrupts twice in each loop:
- * when the DMA address reaches the end of BUFA or the end of the
- * BUFB
- */
- if (buf_len != 2 * period_len)
- return ERR_PTR(-EINVAL);
-
- /* Get free descriptor */
- spin_lock_irqsave(&schan->lock, iflags);
- if (!list_empty(&schan->free)) {
- sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
- node);
- list_del(&sdesc->node);
- }
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- if (!sdesc)
- return 0;
-
- /* Place descriptor in prepared list */
- spin_lock_irqsave(&schan->lock, iflags);
- sdesc->addr = addr;
- sdesc->cyclic = 1;
- sdesc->xlen = 0;
- sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
- sdesc->width = 1;
- list_add_tail(&sdesc->node, &schan->prepared);
- spin_unlock_irqrestore(&schan->lock, iflags);
-
- return &sdesc->desc;
-}
-
-/*
- * The DMA controller consists of 16 independent DMA channels.
- * Each channel is allocated to a different function
- */
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
-{
- unsigned int ch_nr = (unsigned int) chan_id;
-
- if (ch_nr == chan->chan_id +
- chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(sirfsoc_dma_filter_id);
-
-static int __devinit sirfsoc_dma_probe(struct platform_device *op)
-{
- struct device_node *dn = op->dev.of_node;
- struct device *dev = &op->dev;
- struct dma_device *dma;
- struct sirfsoc_dma *sdma;
- struct sirfsoc_dma_chan *schan;
- struct resource res;
- ulong regs_start, regs_size;
- u32 id;
- int ret, i;
-
- sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
- if (!sdma) {
- dev_err(dev, "Memory exhausted!\n");
- return -ENOMEM;
- }
-
- if (of_property_read_u32(dn, "cell-index", &id)) {
- dev_err(dev, "Fail to get DMAC index\n");
- ret = -ENODEV;
- goto free_mem;
- }
-
- sdma->irq = irq_of_parse_and_map(dn, 0);
- if (sdma->irq == NO_IRQ) {
- dev_err(dev, "Error mapping IRQ!\n");
- ret = -EINVAL;
- goto free_mem;
- }
-
- ret = of_address_to_resource(dn, 0, &res);
- if (ret) {
- dev_err(dev, "Error parsing memory region!\n");
- goto free_mem;
- }
-
- regs_start = res.start;
- regs_size = resource_size(&res);
-
- sdma->base = devm_ioremap(dev, regs_start, regs_size);
- if (!sdma->base) {
- dev_err(dev, "Error mapping memory region!\n");
- ret = -ENOMEM;
- goto irq_dispose;
- }
-
- ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
- sdma);
- if (ret) {
- dev_err(dev, "Error requesting IRQ!\n");
- ret = -EINVAL;
- goto unmap_mem;
- }
-
- dma = &sdma->dma;
- dma->dev = dev;
- dma->chancnt = SIRFSOC_DMA_CHANNELS;
-
- dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
- dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
- dma->device_issue_pending = sirfsoc_dma_issue_pending;
- dma->device_control = sirfsoc_dma_control;
- dma->device_tx_status = sirfsoc_dma_tx_status;
- dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
- dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
-
- INIT_LIST_HEAD(&dma->channels);
- dma_cap_set(DMA_SLAVE, dma->cap_mask);
- dma_cap_set(DMA_CYCLIC, dma->cap_mask);
- dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma_cap_set(DMA_PRIVATE, dma->cap_mask);
-
- for (i = 0; i < dma->chancnt; i++) {
- schan = &sdma->channels[i];
-
- schan->chan.device = dma;
- schan->chan.cookie = 1;
- schan->completed_cookie = schan->chan.cookie;
-
- INIT_LIST_HEAD(&schan->free);
- INIT_LIST_HEAD(&schan->prepared);
- INIT_LIST_HEAD(&schan->queued);
- INIT_LIST_HEAD(&schan->active);
- INIT_LIST_HEAD(&schan->completed);
-
- spin_lock_init(&schan->lock);
- list_add_tail(&schan->chan.device_node, &dma->channels);
- }
-
- tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
-
- /* Register DMA engine */
- dev_set_drvdata(dev, sdma);
- ret = dma_async_device_register(dma);
- if (ret)
- goto free_irq;
-
- dev_info(dev, "initialized SIRFSOC DMAC driver\n");
-
- return 0;
-
-free_irq:
- devm_free_irq(dev, sdma->irq, sdma);
-irq_dispose:
- irq_dispose_mapping(sdma->irq);
-unmap_mem:
- iounmap(sdma->base);
-free_mem:
- devm_kfree(dev, sdma);
- return ret;
-}
-
-static int __devexit sirfsoc_dma_remove(struct platform_device *op)
-{
- struct device *dev = &op->dev;
- struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
-
- dma_async_device_unregister(&sdma->dma);
- devm_free_irq(dev, sdma->irq, sdma);
- irq_dispose_mapping(sdma->irq);
- iounmap(sdma->base);
- devm_kfree(dev, sdma);
- return 0;
-}
-
-static struct of_device_id sirfsoc_dma_match[] = {
- { .compatible = "sirf,prima2-dmac", },
- {},
-};
-
-static struct platform_driver sirfsoc_dma_driver = {
- .probe = sirfsoc_dma_probe,
- .remove = __devexit_p(sirfsoc_dma_remove),
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .of_match_table = sirfsoc_dma_match,
- },
-};
-
-module_platform_driver(sirfsoc_dma_driver);
-
-MODULE_AUTHOR("Rongjun Ying , "
- "Barry Song ");
-MODULE_DESCRIPTION("SIRFSOC DMA control driver");
-MODULE_LICENSE("GPL v2");
diff --git a/trunk/drivers/dma/ste_dma40.c b/trunk/drivers/dma/ste_dma40.c
index cc5ecbc067a3..13259cad0ceb 100644
--- a/trunk/drivers/dma/ste_dma40.c
+++ b/trunk/drivers/dma/ste_dma40.c
@@ -14,8 +14,6 @@
#include
#include
#include
-#include
-#include
#include
#include
@@ -34,9 +32,6 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
-/* Milliseconds */
-#define DMA40_AUTOSUSPEND_DELAY 100
-
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
@@ -67,55 +62,6 @@ enum d40_command {
D40_DMA_SUSPENDED = 3
};
-/*
- * These are the registers that has to be saved and later restored
- * when the DMA hw is powered off.
- * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
- */
-static u32 d40_backup_regs[] = {
- D40_DREG_LCPA,
- D40_DREG_LCLA,
- D40_DREG_PRMSE,
- D40_DREG_PRMSO,
- D40_DREG_PRMOE,
- D40_DREG_PRMOO,
-};
-
-#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
-
-/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
-static u32 d40_backup_regs_v3[] = {
- D40_DREG_PSEG1,
- D40_DREG_PSEG2,
- D40_DREG_PSEG3,
- D40_DREG_PSEG4,
- D40_DREG_PCEG1,
- D40_DREG_PCEG2,
- D40_DREG_PCEG3,
- D40_DREG_PCEG4,
- D40_DREG_RSEG1,
- D40_DREG_RSEG2,
- D40_DREG_RSEG3,
- D40_DREG_RSEG4,
- D40_DREG_RCEG1,
- D40_DREG_RCEG2,
- D40_DREG_RCEG3,
- D40_DREG_RCEG4,
-};
-
-#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
-
-static u32 d40_backup_regs_chan[] = {
- D40_CHAN_REG_SSCFG,
- D40_CHAN_REG_SSELT,
- D40_CHAN_REG_SSPTR,
- D40_CHAN_REG_SSLNK,
- D40_CHAN_REG_SDCFG,
- D40_CHAN_REG_SDELT,
- D40_CHAN_REG_SDPTR,
- D40_CHAN_REG_SDLNK,
-};
-
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
@@ -150,7 +96,7 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * @cyclic: true if this is a cyclic job
+ * the previous one.
*
* This descriptor is used for both logical and physical transfers.
*/
@@ -197,7 +143,6 @@ struct d40_lcla_pool {
* channels.
*
* @lock: A lock protection this entity.
- * @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
@@ -207,7 +152,6 @@ struct d40_lcla_pool {
*/
struct d40_phy_res {
spinlock_t lock;
- bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
@@ -241,6 +185,7 @@ struct d40_base;
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
+ * @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
@@ -272,7 +217,7 @@ struct d40_chan {
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
- enum dma_transfer_direction runtime_direction;
+ enum dma_data_direction runtime_direction;
};
/**
@@ -296,7 +241,6 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
- * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -304,20 +248,12 @@ struct d40_chan {
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
- * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
- * @reg_val_backup: Here the values of some hardware registers are stored
- * before the DMA is powered off. They are restored when the power is back on.
- * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
- * later.
- * @reg_val_backup_chan: Backup data for standard channel parameter registers.
- * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
- * @initialized: true if the dma has been initialized
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -339,7 +275,6 @@ struct d40_base {
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
- struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
@@ -347,11 +282,6 @@ struct d40_base {
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
- u32 reg_val_backup[BACKUP_REGS_SZ];
- u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
- u32 *reg_val_backup_chan;
- u16 gcc_pwr_off_mask;
- bool initialized;
};
/**
@@ -549,14 +479,13 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
struct d40_desc *d;
struct d40_desc *_d;
- list_for_each_entry_safe(d, _d, &d40c->client, node) {
+ list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
- }
}
if (!desc)
@@ -607,7 +536,6 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
- bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
@@ -680,16 +608,11 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
&lli->src[lli_current],
next_lcla, flags);
- /*
- * Cache maintenance is not needed if lcla is
- * mapped in esram
- */
- if (!use_esram_lcla) {
- dma_sync_single_range_for_device(chan->base->dev,
- pool->dma_addr, lcla_offset,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
- }
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -817,61 +740,7 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-
-#ifdef CONFIG_PM
-static void dma40_backup(void __iomem *baseaddr, u32 *backup,
- u32 *regaddr, int num, bool save)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- void __iomem *addr = baseaddr + regaddr[i];
-
- if (save)
- backup[i] = readl_relaxed(addr);
- else
- writel_relaxed(backup[i], addr);
- }
-}
-
-static void d40_save_restore_registers(struct d40_base *base, bool save)
-{
- int i;
-
- /* Save/Restore channel specific registers */
- for (i = 0; i < base->num_phy_chans; i++) {
- void __iomem *addr;
- int idx;
-
- if (base->phy_res[i].reserved)
- continue;
-
- addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
- idx = i * ARRAY_SIZE(d40_backup_regs_chan);
-
- dma40_backup(addr, &base->reg_val_backup_chan[idx],
- d40_backup_regs_chan,
- ARRAY_SIZE(d40_backup_regs_chan),
- save);
- }
-
- /* Save/Restore global registers */
- dma40_backup(base->virtbase, base->reg_val_backup,
- d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
- save);
-
- /* Save/Restore registers only existing on dma40 v3 and later */
- if (base->rev >= 3)
- dma40_backup(base->virtbase, base->reg_val_backup_v3,
- d40_backup_regs_v3,
- ARRAY_SIZE(d40_backup_regs_v3),
- save);
-}
-#else
-static void d40_save_restore_registers(struct d40_base *base, bool save)
-{
-}
-#endif
+/* Support functions for logical channels */
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -1104,10 +973,6 @@ static void d40_config_write(struct d40_chan *d40c)
/* Set LIDX for lcla */
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
-
- /* Clear LNK which will be used by d40_chan_has_events() */
- writel(0, chanbase + D40_CHAN_REG_SSLNK);
- writel(0, chanbase + D40_CHAN_REG_SDLNK);
}
}
@@ -1148,7 +1013,6 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
- pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1161,8 +1025,7 @@ static int d40_pause(struct d40_chan *d40c)
D40_DMA_RUN);
}
}
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
+
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1176,7 +1039,7 @@ static int d40_resume(struct d40_chan *d40c)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
- pm_runtime_get_sync(d40c->base->dev);
+
if (d40c->base->rev == 0)
if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
@@ -1194,8 +1057,6 @@ static int d40_resume(struct d40_chan *d40c)
}
no_suspend:
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1268,10 +1129,7 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- if (!d40c->busy)
- d40c->busy = true;
-
- pm_runtime_get_sync(d40c->base->dev);
+ d40c->busy = true;
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1332,8 +1190,6 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40_queue_start(d40c) == NULL)
d40c->busy = false;
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->pending_tx++;
@@ -1549,16 +1405,11 @@ static int d40_validate_conf(struct d40_chan *d40c,
return res;
}
-static bool d40_alloc_mask_set(struct d40_phy_res *phy,
- bool is_src, int log_event_line, bool is_log,
- bool *first_user)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
+ int log_event_line, bool is_log)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
-
- *first_user = ((phy->allocated_src | phy->allocated_dst)
- == D40_ALLOC_FREE);
-
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1639,7 +1490,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
return is_free;
}
-static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
+static int d40_allocate_channel(struct d40_chan *d40c)
{
int dev_type;
int event_group;
@@ -1675,8 +1526,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
for (i = 0; i < d40c->base->num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log,
- first_phy_user))
+ 0, is_log))
goto found_phy;
}
} else
@@ -1686,8 +1536,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
- is_log,
- first_phy_user))
+ is_log))
goto found_phy;
}
}
@@ -1703,25 +1552,6 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
-
- if (d40c->dma_cfg.use_fixed_channel) {
- i = d40c->dma_cfg.phy_channel;
-
- if ((i != phy_num) && (i != phy_num + 1)) {
- dev_err(chan2dev(d40c),
- "invalid fixed phy channel %d\n", i);
- return -EINVAL;
- }
-
- if (d40_alloc_mask_set(&phys[i], is_src, event_line,
- is_log, first_phy_user))
- goto found_log;
-
- dev_err(chan2dev(d40c),
- "could not allocate fixed phy channel %d\n", i);
- return -EINVAL;
- }
-
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
@@ -1730,15 +1560,13 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log,
- first_phy_user))
+ event_line, is_log))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log,
- first_phy_user))
+ event_line, is_log))
goto found_log;
}
}
@@ -1815,11 +1643,10 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
- pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
chan_err(d40c, "suspend failed\n");
- goto out;
+ return res;
}
if (chan_is_logical(d40c)) {
@@ -1837,11 +1664,13 @@ static int d40_free_dma(struct d40_chan *d40c)
if (d40_chan_has_events(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
- if (res)
+ if (res) {
chan_err(d40c,
"Executing RUN command\n");
+ return res;
+ }
}
- goto out;
+ return 0;
}
} else {
(void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1851,23 +1680,13 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "Failed to stop channel\n");
- goto out;
+ return res;
}
-
- if (d40c->busy) {
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
- }
-
- d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
-out:
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
- return res;
+ return 0;
}
static bool d40_is_paused(struct d40_chan *d40c)
@@ -2036,7 +1855,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
}
static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -2045,9 +1864,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
if (chan->runtime_addr)
return chan->runtime_addr;
- if (direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_FROM_DEVICE)
addr = plat->dev_rx[cfg->src_dev_type];
- else if (direction == DMA_MEM_TO_DEV)
+ else if (direction == DMA_TO_DEVICE)
addr = plat->dev_tx[cfg->dst_dev_type];
return addr;
@@ -2056,7 +1875,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long dma_flags)
+ enum dma_data_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
dma_addr_t src_dev_addr = 0;
@@ -2083,9 +1902,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (direction != DMA_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
- if (direction == DMA_DEV_TO_MEM)
+ if (direction == DMA_FROM_DEVICE)
src_dev_addr = dev_addr;
- else if (direction == DMA_MEM_TO_DEV)
+ else if (direction == DMA_TO_DEVICE)
dst_dev_addr = dev_addr;
}
@@ -2192,15 +2011,14 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
goto fail;
}
}
+ is_free_phy = (d40c->phy_chan == NULL);
- err = d40_allocate_channel(d40c, &is_free_phy);
+ err = d40_allocate_channel(d40c);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
- d40c->configured = false;
goto fail;
}
- pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2220,12 +2038,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
- dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
- chan_is_logical(d40c) ? "logical" : "physical",
- d40c->phy_chan->num,
- d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
-
-
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
@@ -2234,8 +2046,6 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (is_free_phy)
d40_config_write(d40c);
fail:
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
@@ -2298,10 +2108,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
- enum dma_transfer_direction direction,
+ enum dma_data_direction direction,
unsigned long dma_flags)
{
- if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
+ if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2310,7 +2120,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
+ enum dma_data_direction direction)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2459,7 +2269,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_addr_width = config->dst_addr_width;
dst_maxburst = config->dst_maxburst;
- if (config->direction == DMA_DEV_TO_MEM) {
+ if (config->direction == DMA_FROM_DEVICE) {
dma_addr_t dev_addr_rx =
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
@@ -2482,7 +2292,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (dst_maxburst == 0)
dst_maxburst = src_maxburst;
- } else if (config->direction == DMA_MEM_TO_DEV) {
+ } else if (config->direction == DMA_TO_DEVICE) {
dma_addr_t dev_addr_tx =
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
@@ -2547,7 +2357,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
"configured channel %s for %s, data width %d/%d, "
"maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
- (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
+ (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
src_addr_width, dst_addr_width,
src_maxburst, dst_maxburst);
@@ -2709,72 +2519,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
return err;
}
-/* Suspend resume functionality */
-#ifdef CONFIG_PM
-static int dma40_pm_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
- int ret = 0;
- if (!pm_runtime_suspended(dev))
- return -EBUSY;
-
- if (base->lcpa_regulator)
- ret = regulator_disable(base->lcpa_regulator);
- return ret;
-}
-
-static int dma40_runtime_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
-
- d40_save_restore_registers(base, true);
-
- /* Don't disable/enable clocks for v1 due to HW bugs */
- if (base->rev != 1)
- writel_relaxed(base->gcc_pwr_off_mask,
- base->virtbase + D40_DREG_GCC);
-
- return 0;
-}
-
-static int dma40_runtime_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
-
- if (base->initialized)
- d40_save_restore_registers(base, false);
-
- writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
- base->virtbase + D40_DREG_GCC);
- return 0;
-}
-
-static int dma40_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct d40_base *base = platform_get_drvdata(pdev);
- int ret = 0;
-
- if (base->lcpa_regulator)
- ret = regulator_enable(base->lcpa_regulator);
-
- return ret;
-}
-
-static const struct dev_pm_ops dma40_pm_ops = {
- .suspend = dma40_pm_suspend,
- .runtime_suspend = dma40_runtime_suspend,
- .runtime_resume = dma40_runtime_resume,
- .resume = dma40_resume,
-};
-#define DMA40_PM_OPS (&dma40_pm_ops)
-#else
-#define DMA40_PM_OPS NULL
-#endif
-
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
@@ -2783,7 +2527,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
- int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2795,17 +2538,9 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
- base->phy_res[i].reserved = true;
- gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
- D40_DREG_GCC_SRC);
- gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
- D40_DREG_GCC_DST);
-
-
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
- base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
@@ -2817,11 +2552,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
- base->phy_res[chan].reserved = true;
- gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
- D40_DREG_GCC_SRC);
- gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
- D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
@@ -2842,15 +2572,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
val[0] = val[0] >> 2;
}
- /*
- * To keep things simple, Enable all clocks initially.
- * The clocks will get managed later post channel allocation.
- * The clocks for the event lines on which reserved channels exists
- * are not managed here.
- */
- writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
- base->gcc_pwr_off_mask = gcc;
-
return num_phy_chans_avail;
}
@@ -2978,15 +2699,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
- sizeof(d40_backup_regs_chan),
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
+ sizeof(struct d40_desc *) *
+ D40_LCLA_LINK_PER_EVENT_GRP,
GFP_KERNEL);
- if (!base->reg_val_backup_chan)
- goto failure;
-
- base->lcla_pool.alloc_map =
- kzalloc(num_phy_chans * sizeof(struct d40_desc *)
- * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -3025,9 +2741,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
static void __init d40_hw_init(struct d40_base *base)
{
- static struct d40_reg_val dma_init_reg[] = {
+ static const struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
+ { .reg = D40_DREG_GCC, .val = 0x0000ff01},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -3227,31 +2943,11 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
- /* If lcla has to be located in ESRAM we don't need to allocate */
- if (base->plat_data->use_esram_lcla) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "lcla_esram");
- if (!res) {
- ret = -ENOENT;
- d40_err(&pdev->dev,
- "No \"lcla_esram\" memory resource\n");
- goto failure;
- }
- base->lcla_pool.base = ioremap(res->start,
- resource_size(res));
- if (!base->lcla_pool.base) {
- ret = -ENOMEM;
- d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
- goto failure;
- }
- writel(res->start, base->virtbase + D40_DREG_LCLA);
- } else {
- ret = d40_lcla_allocate(base);
- if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
- }
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ goto failure;
}
spin_lock_init(&base->lcla_pool.lock);
@@ -3264,32 +2960,6 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
- pm_runtime_irq_safe(base->dev);
- pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(base->dev);
- pm_runtime_enable(base->dev);
- pm_runtime_resume(base->dev);
-
- if (base->plat_data->use_esram_lcla) {
-
- base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
- if (IS_ERR(base->lcpa_regulator)) {
- d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
- base->lcpa_regulator = NULL;
- goto failure;
- }
-
- ret = regulator_enable(base->lcpa_regulator);
- if (ret) {
- d40_err(&pdev->dev,
- "Failed to enable lcpa_regulator\n");
- regulator_put(base->lcpa_regulator);
- base->lcpa_regulator = NULL;
- goto failure;
- }
- }
-
- base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
@@ -3306,11 +2976,6 @@ static int __init d40_probe(struct platform_device *pdev)
if (base->virtbase)
iounmap(base->virtbase);
- if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
- iounmap(base->lcla_pool.base);
- base->lcla_pool.base = NULL;
- }
-
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -3333,11 +2998,6 @@ static int __init d40_probe(struct platform_device *pdev)
clk_put(base->clk);
}
- if (base->lcpa_regulator) {
- regulator_disable(base->lcpa_regulator);
- regulator_put(base->lcpa_regulator);
- }
-
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
@@ -3353,7 +3013,6 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
- .pm = DMA40_PM_OPS,
},
};
diff --git a/trunk/drivers/dma/ste_dma40_ll.h b/trunk/drivers/dma/ste_dma40_ll.h
index 8d3d490968a3..b44c455158de 100644
--- a/trunk/drivers/dma/ste_dma40_ll.h
+++ b/trunk/drivers/dma/ste_dma40_ll.h
@@ -16,8 +16,6 @@
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
-#define D40_GROUP_SIZE 8
-#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
/* Most bits of the CFG register are the same in log as in phy mode */
#define D40_SREG_CFG_MST_POS 15
@@ -125,15 +123,6 @@
/* DMA Register Offsets */
#define D40_DREG_GCC 0x000
-#define D40_DREG_GCC_ENA 0x1
-/* This assumes that there are only 4 event groups */
-#define D40_DREG_GCC_ENABLE_ALL 0xff01
-#define D40_DREG_GCC_EVTGRP_POS 8
-#define D40_DREG_GCC_SRC 0
-#define D40_DREG_GCC_DST 1
-#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
- (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
-
#define D40_DREG_PRTYP 0x004
#define D40_DREG_PRSME 0x008
#define D40_DREG_PRSMO 0x00C
diff --git a/trunk/drivers/dma/timb_dma.c b/trunk/drivers/dma/timb_dma.c
index a6f9c1684a0f..a4a398f2ef61 100644
--- a/trunk/drivers/dma/timb_dma.c
+++ b/trunk/drivers/dma/timb_dma.c
@@ -90,7 +90,7 @@ struct timb_dma_chan {
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
- enum dma_transfer_direction direction;
+ enum dma_data_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- DMA_TO_DEVICE);
+ td_chan->direction);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- DMA_TO_DEVICE);
+ td_chan->direction);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
- if (td_chan->direction == DMA_DEV_TO_MEM) {
+ if (td_chan->direction == DMA_FROM_DEVICE) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
txd->cookie);
/* make sure to stop the transfer */
- if (td_chan->direction == DMA_DEV_TO_MEM)
+ if (td_chan->direction == DMA_FROM_DEVICE)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags)
+ enum dma_data_direction direction, unsigned long flags)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_MEM_TO_DEV);
+ td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
- td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
- DMA_MEM_TO_DEV;
+ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,7 +841,17 @@ static struct platform_driver td_driver = {
.remove = __exit_p(td_remove),
};
-module_platform_driver(td_driver);
+static int __init td_init(void)
+{
+ return platform_driver_register(&td_driver);
+}
+module_init(td_init);
+
+static void __exit td_exit(void)
+{
+ platform_driver_unregister(&td_driver);
+}
+module_exit(td_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
diff --git a/trunk/drivers/dma/txx9dmac.c b/trunk/drivers/dma/txx9dmac.c
index 6122c364cf11..cbd83e362b5e 100644
--- a/trunk/drivers/dma/txx9dmac.c
+++ b/trunk/drivers/dma/txx9dmac.c
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
BUG_ON(!ds || !ds->reg_width);
if (ds->tx_reg)
- BUG_ON(direction != DMA_MEM_TO_DEV);
+ BUG_ON(direction != DMA_TO_DEVICE);
else
- BUG_ON(direction != DMA_DEV_TO_MEM);
+ BUG_ON(direction != DMA_FROM_DEVICE);
if (unlikely(!sg_len))
return NULL;
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
if (__is_dmac64(ddev)) {
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
desc->hwdesc.SAR = mem;
desc->hwdesc.DAR = ds->tx_reg;
} else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc.CNTR = sg_dma_len(sg);
} else {
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
desc->hwdesc32.SAR = mem;
desc->hwdesc32.DAR = ds->tx_reg;
} else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc32.CNTR = sg_dma_len(sg);
}
- if (direction == DMA_MEM_TO_DEV) {
+ if (direction == DMA_TO_DEVICE) {
sai = ds->reg_width;
dai = 0;
} else {
diff --git a/trunk/drivers/i2c/busses/Kconfig b/trunk/drivers/i2c/busses/Kconfig
index 3101dd59e379..cbe7a2fb779f 100644
--- a/trunk/drivers/i2c/busses/Kconfig
+++ b/trunk/drivers/i2c/busses/Kconfig
@@ -682,19 +682,19 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_EG20T
- tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
depends on PCI
help
This driver is for PCH(Platform controller Hub) I2C of EG20T which
is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH I2C bus device.
- This driver also can be used for LAPIS Semiconductor IOH(Input/
- Output Hub), ML7213, ML7223 and ML7831.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
- for MP(Media Phone) use and ML7831 IOH is for general purpose use.
- ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7213 and ML7223.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+ for MP(Media Phone) use.
+ ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223 is completely compatible for Intel EG20T PCH.
comment "External I2C/SMBus adapter drivers"
diff --git a/trunk/drivers/i2c/busses/i2c-eg20t.c b/trunk/drivers/i2c/busses/i2c-eg20t.c
index ca8877641040..3ef3557b6e32 100644
--- a/trunk/drivers/i2c/busses/i2c-eg20t.c
+++ b/trunk/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -136,8 +136,7 @@
/*
Set the number of I2C instance max
Intel EG20T PCH : 1ch
-LAPIS Semiconductor ML7213 IOH : 2ch
-LAPIS Semiconductor ML7831 IOH : 1ch
+OKI SEMICONDUCTOR ML7213 IOH : 2ch
*/
#define PCH_I2C_MAX_DEV 2
@@ -181,17 +180,15 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
-/* Definition for ML7213 by LAPIS Semiconductor */
+/* Definition for ML7213 by OKI SEMICONDUCTOR */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
-#define PCI_DEVICE_ID_ML7831_I2C 0x8817
static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
- { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
@@ -246,7 +243,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
if (pch_clk > PCH_MAX_CLK)
pch_clk = 62500;
- pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
+ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
/* Set transfer speed in I2CBC */
iowrite32(pch_i2cbc, p + PCH_I2CBC);
@@ -921,9 +918,7 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
pch_i2c_init(&adap_info->pch_data[i]);
-
- pch_adap->nr = i;
- ret = i2c_add_numbered_adapter(pch_adap);
+ ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
goto err_add_adapter;
@@ -1063,8 +1058,8 @@ static void __exit pch_pci_exit(void)
}
module_exit(pch_pci_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
+MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. ");
+MODULE_AUTHOR("Tomoya MORINAGA. ");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/trunk/drivers/i2c/busses/i2c-omap.c b/trunk/drivers/i2c/busses/i2c-omap.c
index f713eac55047..fa23faa20f0e 100644
--- a/trunk/drivers/i2c/busses/i2c-omap.c
+++ b/trunk/drivers/i2c/busses/i2c-omap.c
@@ -37,9 +37,6 @@
#include
#include
#include
-#include
-#include
-#include
#include
#include
#include
@@ -185,9 +182,7 @@ struct omap_i2c_dev {
u32 latency; /* maximum mpu wkup latency */
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
- u32 speed; /* Speed of bus in kHz */
- u32 dtrev; /* extra revision from DT */
- u32 flags;
+ u32 speed; /* Speed of bus in Khz */
u16 cmd_err;
u8 *buf;
u8 *regs;
@@ -240,7 +235,7 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_BUF_REG] = 0x94,
[OMAP_I2C_CNT_REG] = 0x98,
[OMAP_I2C_DATA_REG] = 0x9c,
- [OMAP_I2C_SYSC_REG] = 0x10,
+ [OMAP_I2C_SYSC_REG] = 0x20,
[OMAP_I2C_CON_REG] = 0xa4,
[OMAP_I2C_OA_REG] = 0xa8,
[OMAP_I2C_SA_REG] = 0xac,
@@ -271,7 +266,11 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
static void omap_i2c_unidle(struct omap_i2c_dev *dev)
{
- if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ struct omap_i2c_bus_platform_data *pdata;
+
+ pdata = dev->dev->platform_data;
+
+ if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -292,10 +291,13 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
static void omap_i2c_idle(struct omap_i2c_dev *dev)
{
+ struct omap_i2c_bus_platform_data *pdata;
u16 iv;
+ pdata = dev->dev->platform_data;
+
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
+ if (pdata->rev == OMAP_I2C_IP_VERSION_2)
omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -318,6 +320,9 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long timeout;
unsigned long internal_clk = 0;
struct clk *fclk;
+ struct omap_i2c_bus_platform_data *pdata;
+
+ pdata = dev->dev->platform_data;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
/* Disable I2C controller before soft reset */
@@ -368,7 +373,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+ if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -392,7 +397,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+ if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -401,7 +406,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* The filter is iclk (fclk for HS) period.
*/
if (dev->speed > 400 ||
- dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+ pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
@@ -470,7 +475,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->errata = 0;
- if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
dev->errata |= I2C_OMAP_ERRATA_I207;
/* Enable interrupts */
@@ -479,7 +484,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -799,6 +804,9 @@ omap_i2c_isr(int this_irq, void *dev_id)
u16 bits;
u16 stat, w;
int err, count = 0;
+ struct omap_i2c_bus_platform_data *pdata;
+
+ pdata = dev->dev->platform_data;
if (pm_runtime_suspended(dev->dev))
return IRQ_NONE;
@@ -822,9 +830,11 @@ omap_i2c_isr(int this_irq, void *dev_id)
~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- if (stat & OMAP_I2C_STAT_NACK)
+ if (stat & OMAP_I2C_STAT_NACK) {
err |= OMAP_I2C_STAT_NACK;
-
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
+ OMAP_I2C_CON_STP);
+ }
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
@@ -865,7 +875,7 @@ omap_i2c_isr(int this_irq, void *dev_id)
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (dev->flags &
+ if (pdata->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
@@ -908,7 +918,7 @@ omap_i2c_isr(int this_irq, void *dev_id)
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (dev->flags &
+ if (pdata->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
@@ -955,32 +965,6 @@ static const struct i2c_algorithm omap_i2c_algo = {
.functionality = omap_i2c_func,
};
-#ifdef CONFIG_OF
-static struct omap_i2c_bus_platform_data omap3_pdata = {
- .rev = OMAP_I2C_IP_VERSION_1,
- .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
- OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
- OMAP_I2C_FLAG_BUS_SHIFT_2,
-};
-
-static struct omap_i2c_bus_platform_data omap4_pdata = {
- .rev = OMAP_I2C_IP_VERSION_2,
-};
-
-static const struct of_device_id omap_i2c_of_match[] = {
- {
- .compatible = "ti,omap4-i2c",
- .data = &omap4_pdata,
- },
- {
- .compatible = "ti,omap3-i2c",
- .data = &omap3_pdata,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
-#endif
-
static int __devinit
omap_i2c_probe(struct platform_device *pdev)
{
@@ -988,10 +972,9 @@ omap_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
- struct device_node *node = pdev->dev.of_node;
- const struct of_device_id *match;
irq_handler_t isr;
int r;
+ u32 speed = 0;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1018,24 +1001,15 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- match = of_match_device(omap_i2c_of_match, &pdev->dev);
- if (match) {
- u32 freq = 100000; /* default to 100000 Hz */
-
- pdata = match->data;
- dev->dtrev = pdata->rev;
- dev->flags = pdata->flags;
-
- of_property_read_u32(node, "clock-frequency", &freq);
- /* convert DT freq value in Hz into kHz for speed */
- dev->speed = freq / 1000;
- } else if (pdata != NULL) {
- dev->speed = pdata->clkrate;
- dev->flags = pdata->flags;
+ if (pdata != NULL) {
+ speed = pdata->clkrate;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- dev->dtrev = pdata->rev;
+ } else {
+ speed = 100; /* Default speed */
+ dev->set_mpu_wkup_lat = NULL;
}
+ dev->speed = speed;
dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->base = ioremap(mem->start, resource_size(mem));
@@ -1046,9 +1020,9 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+ dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
+ if (pdata->rev == OMAP_I2C_IP_VERSION_2)
dev->regs = (u8 *)reg_map_ip_v2;
else
dev->regs = (u8 *)reg_map_ip_v1;
@@ -1061,7 +1035,7 @@ omap_i2c_probe(struct platform_device *pdev)
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
- if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+ if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -1084,7 +1058,7 @@ omap_i2c_probe(struct platform_device *pdev)
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
- (1000 * dev->speed / 8);
+ (1000 * speed / 8);
}
/* reset ASAP, clearing any IRQs */
@@ -1100,7 +1074,7 @@ omap_i2c_probe(struct platform_device *pdev)
}
dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
- dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
pm_runtime_put(dev->dev);
@@ -1111,7 +1085,6 @@ omap_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
@@ -1121,8 +1094,6 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_free_irq;
}
- of_i2c_register_devices(adap);
-
return 0;
err_free_irq:
@@ -1195,7 +1166,6 @@ static struct platform_driver omap_i2c_driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
.pm = OMAP_I2C_PM_OPS,
- .of_match_table = of_match_ptr(omap_i2c_of_match),
},
};
diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c
index 20bce51c2e82..5d2f8e13cf0e 100644
--- a/trunk/drivers/idle/intel_idle.c
+++ b/trunk/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
};
-static long get_driver_data(int cstate)
+static int get_driver_data(int cstate)
{
int driver_data;
switch (cstate) {
@@ -232,7 +232,6 @@ static long get_driver_data(int cstate)
* @drv: cpuidle driver
* @index: index of cpuidle state
*
- * Must be called under local_irq_disable().
*/
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -248,6 +247,8 @@ static int intel_idle(struct cpuidle_device *dev,
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+ local_irq_disable();
+
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
@@ -347,8 +348,7 @@ static int intel_idle_probe(void)
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
- !mwait_substates)
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
return -ENODEV;
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
else {
- on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+ smp_call_function(__setup_broadcast_timer, (void *)true, 1);
register_cpu_notifier(&setup_broadcast_notifier);
}
@@ -471,67 +471,71 @@ static int intel_idle_cpuidle_driver_init(void)
}
if (auto_demotion_disable_flags)
- on_each_cpu(auto_demotion_disable, NULL, 1);
+ smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
/*
- * intel_idle_cpu_init()
+ * intel_idle_cpuidle_devices_init()
* allocate, initialize, register cpuidle_devices
- * @cpu: cpu/core to initialize
*/
-int intel_idle_cpu_init(int cpu)
+static int intel_idle_cpuidle_devices_init(void)
{
- int cstate;
+ int i, cstate;
struct cpuidle_device *dev;
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
- dev->state_count = 1;
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ dev->state_count = 1;
- if (cstate > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n",
- max_cstate);
- break;
- }
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
- /* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
- & MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL)
- continue;
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
+
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (mwait_substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL) {
+ continue;
+ }
- dev->states_usage[dev->state_count].driver_data =
- (void *)get_driver_data(cstate);
+ dev->states_usage[dev->state_count].driver_data =
+ (void *)get_driver_data(cstate);
dev->state_count += 1;
}
- dev->cpu = cpu;
- if (cpuidle_register_device(dev)) {
- pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
- intel_idle_cpuidle_devices_uninit();
- return -EIO;
+ dev->cpu = i;
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
+ i);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
+ }
}
- if (auto_demotion_disable_flags)
- smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
-
return 0;
}
static int __init intel_idle_init(void)
{
- int retval, i;
+ int retval;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -549,16 +553,10 @@ static int __init intel_idle_init(void)
return retval;
}
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_online_cpu(i) {
- retval = intel_idle_cpu_init(i);
- if (retval) {
- cpuidle_unregister_driver(&intel_idle_driver);
- return retval;
- }
+ retval = intel_idle_cpuidle_devices_init();
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
}
return 0;
@@ -570,7 +568,7 @@ static void __exit intel_idle_exit(void)
cpuidle_unregister_driver(&intel_idle_driver);
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
- on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
+ smp_call_function(__setup_broadcast_timer, (void *)false, 1);
unregister_cpu_notifier(&setup_broadcast_notifier);
}
diff --git a/trunk/drivers/infiniband/Kconfig b/trunk/drivers/infiniband/Kconfig
index eb0add311dc8..0f9a84c1046a 100644
--- a/trunk/drivers/infiniband/Kconfig
+++ b/trunk/drivers/infiniband/Kconfig
@@ -55,7 +55,6 @@ source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
source "drivers/infiniband/ulp/srp/Kconfig"
-source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
diff --git a/trunk/drivers/infiniband/Makefile b/trunk/drivers/infiniband/Makefile
index a3b2d8eac86e..9cc7a47d3e67 100644
--- a/trunk/drivers/infiniband/Makefile
+++ b/trunk/drivers/infiniband/Makefile
@@ -10,5 +10,4 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
-obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/trunk/drivers/infiniband/ulp/srpt/Kconfig b/trunk/drivers/infiniband/ulp/srpt/Kconfig
deleted file mode 100644
index 31ee83d528d9..000000000000
--- a/trunk/drivers/infiniband/ulp/srpt/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config INFINIBAND_SRPT
- tristate "InfiniBand SCSI RDMA Protocol target support"
- depends on INFINIBAND && TARGET_CORE
- ---help---
-
- Support for the SCSI RDMA Protocol (SRP) Target driver. The
- SRP protocol is a protocol that allows an initiator to access
- a block storage device on another host (target) over a network
- that supports the RDMA protocol. Currently the RDMA protocol is
- supported by InfiniBand and by iWarp network hardware. More
- information about the SRP protocol can be found on the website
- of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/trunk/drivers/infiniband/ulp/srpt/Makefile b/trunk/drivers/infiniband/ulp/srpt/Makefile
deleted file mode 100644
index e3ee4bdfffa5..000000000000
--- a/trunk/drivers/infiniband/ulp/srpt/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-ccflags-y := -Idrivers/target
-obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/trunk/drivers/infiniband/ulp/srpt/ib_dm_mad.h
deleted file mode 100644
index fb1de1f6f297..000000000000
--- a/trunk/drivers/infiniband/ulp/srpt/ib_dm_mad.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef IB_DM_MAD_H
-#define IB_DM_MAD_H
-
-#include
-
-#include
-
-enum {
- /*
- * See also section 13.4.7 Status Field, table 115 MAD Common Status
- * Field Bit Values and also section 16.3.1.1 Status Field in the
- * InfiniBand Architecture Specification.
- */
- DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
- DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
- DM_MAD_STATUS_INVALID_FIELD = 0x001c,
- DM_MAD_STATUS_NO_IOC = 0x0100,
-
- /*
- * See also the Device Management chapter, section 16.3.3 Attributes,
- * table 279 Device Management Attributes in the InfiniBand
- * Architecture Specification.
- */
- DM_ATTR_CLASS_PORT_INFO = 0x01,
- DM_ATTR_IOU_INFO = 0x10,
- DM_ATTR_IOC_PROFILE = 0x11,
- DM_ATTR_SVC_ENTRIES = 0x12
-};
-
-struct ib_dm_hdr {
- u8 reserved[28];
-};
-
-/*
- * Structure of management datagram sent by the SRP target implementation.
- * Contains a management datagram header, reliable multi-packet transaction
- * protocol (RMPP) header and ib_dm_hdr. Notes:
- * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
- * management datagrams.
- * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
- * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
- * - The maximum supported size for a management datagram when not using RMPP
- * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
- */
-struct ib_dm_mad {
- struct ib_mad_hdr mad_hdr;
- struct ib_rmpp_hdr rmpp_hdr;
- struct ib_dm_hdr dm_hdr;
- u8 data[IB_MGMT_DEVICE_DATA];
-};
-
-/*
- * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
- * Architecture Specification.
- */
-struct ib_dm_iou_info {
- __be16 change_id;
- u8 max_controllers;
- u8 op_rom;
- u8 controller_list[128];
-};
-
-/*
- * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
- * the InfiniBand Architecture Specification.
- */
-struct ib_dm_ioc_profile {
- __be64 guid;
- __be32 vendor_id;
- __be32 device_id;
- __be16 device_version;
- __be16 reserved1;
- __be32 subsys_vendor_id;
- __be32 subsys_device_id;
- __be16 io_class;
- __be16 io_subclass;
- __be16 protocol;
- __be16 protocol_version;
- __be16 service_conn;
- __be16 initiators_supported;
- __be16 send_queue_depth;
- u8 reserved2;
- u8 rdma_read_depth;
- __be32 send_size;
- __be32 rdma_size;
- u8 op_cap_mask;
- u8 svc_cap_mask;
- u8 num_svc_entries;
- u8 reserved3[9];
- u8 id_string[64];
-};
-
-struct ib_dm_svc_entry {
- u8 name[40];
- __be64 id;
-};
-
-/*
- * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
- * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
- */
-struct ib_dm_svc_entries {
- struct ib_dm_svc_entry service_entries[4];
-};
-
-#endif
diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c
deleted file mode 100644
index cd5d05e22a77..000000000000
--- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ /dev/null
@@ -1,4073 +0,0 @@
-/*
- * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
- * Copyright (C) 2008 - 2011 Bart Van Assche .
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "ib_srpt.h"
-
-/* Name of this kernel module. */
-#define DRV_NAME "ib_srpt"
-#define DRV_VERSION "2.0.0"
-#define DRV_RELDATE "2011-02-14"
-
-#define SRPT_ID_STRING "Linux SRP target"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRV_NAME " " fmt
-
-MODULE_AUTHOR("Vu Pham and Bart Van Assche");
-MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
- "v" DRV_VERSION " (" DRV_RELDATE ")");
-MODULE_LICENSE("Dual BSD/GPL");
-
-/*
- * Global Variables
- */
-
-static u64 srpt_service_guid;
-static spinlock_t srpt_dev_lock; /* Protects srpt_dev_list. */
-static struct list_head srpt_dev_list; /* List of srpt_device structures. */
-
-static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
-module_param(srp_max_req_size, int, 0444);
-MODULE_PARM_DESC(srp_max_req_size,
- "Maximum size of SRP request messages in bytes.");
-
-static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
-module_param(srpt_srq_size, int, 0444);
-MODULE_PARM_DESC(srpt_srq_size,
- "Shared receive queue (SRQ) size.");
-
-static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
-{
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
-}
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
- 0444);
-MODULE_PARM_DESC(srpt_service_guid,
- "Using this value for ioc_guid, id_ext, and cm_listen_id"
- " instead of using the node_guid of the first HCA.");
-
-static struct ib_client srpt_client;
-static struct target_fabric_configfs *srpt_target;
-static void srpt_release_channel(struct srpt_rdma_ch *ch);
-static int srpt_queue_status(struct se_cmd *cmd);
-
-/**
- * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
- */
-static inline
-enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
- case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
- default: return dir;
- }
-}
-
-/**
- * srpt_sdev_name() - Return the name associated with the HCA.
- *
- * Examples are ib0, ib1, ...
- */
-static inline const char *srpt_sdev_name(struct srpt_device *sdev)
-{
- return sdev->device->name;
-}
-
-static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
-{
- unsigned long flags;
- enum rdma_ch_state state;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- state = ch->state;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return state;
-}
-
-static enum rdma_ch_state
-srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
-{
- unsigned long flags;
- enum rdma_ch_state prev;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev = ch->state;
- ch->state = new_state;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return prev;
-}
-
-/**
- * srpt_test_and_set_ch_state() - Test and set the channel state.
- *
- * Returns true if and only if the channel state has been set to the new state.
- */
-static bool
-srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
- enum rdma_ch_state new)
-{
- unsigned long flags;
- enum rdma_ch_state prev;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev = ch->state;
- if (prev == old)
- ch->state = new;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return prev == old;
-}
-
-/**
- * srpt_event_handler() - Asynchronous IB event callback function.
- *
- * Callback function called by the InfiniBand core when an asynchronous IB
- * event occurs. This callback may occur in interrupt context. See also
- * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
- * Architecture Specification.
- */
-static void srpt_event_handler(struct ib_event_handler *handler,
- struct ib_event *event)
-{
- struct srpt_device *sdev;
- struct srpt_port *sport;
-
- sdev = ib_get_client_data(event->device, &srpt_client);
- if (!sdev || sdev->device != event->device)
- return;
-
- pr_debug("ASYNC event= %d on device= %s\n", event->event,
- srpt_sdev_name(sdev));
-
- switch (event->event) {
- case IB_EVENT_PORT_ERR:
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
- sport->lid = 0;
- sport->sm_lid = 0;
- }
- break;
- case IB_EVENT_PORT_ACTIVE:
- case IB_EVENT_LID_CHANGE:
- case IB_EVENT_PKEY_CHANGE:
- case IB_EVENT_SM_CHANGE:
- case IB_EVENT_CLIENT_REREGISTER:
- /* Refresh port data asynchronously. */
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
- if (!sport->lid && !sport->sm_lid)
- schedule_work(&sport->work);
- }
- break;
- default:
- printk(KERN_ERR "received unrecognized IB event %d\n",
- event->event);
- break;
- }
-}
-
-/**
- * srpt_srq_event() - SRQ event callback function.
- */
-static void srpt_srq_event(struct ib_event *event, void *ctx)
-{
- printk(KERN_INFO "SRQ event %d\n", event->event);
-}
-
-/**
- * srpt_qp_event() - QP event callback function.
- */
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
-{
- pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
-
- switch (event->event) {
- case IB_EVENT_COMM_EST:
- ib_cm_notify(ch->cm_id, event->event);
- break;
- case IB_EVENT_QP_LAST_WQE_REACHED:
- if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
- CH_RELEASING))
- srpt_release_channel(ch);
- else
- pr_debug("%s: state %d - ignored LAST_WQE.\n",
- ch->sess_name, srpt_get_ch_state(ch));
- break;
- default:
- printk(KERN_ERR "received unrecognized IB QP event %d\n",
- event->event);
- break;
- }
-}
-
-/**
- * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
- *
- * @slot: one-based slot number.
- * @value: four-bit value.
- *
- * Copies the lowest four bits of value in element slot of the array of four
- * bit elements called c_list (controller list). The index slot is one-based.
- */
-static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
-{
- u16 id;
- u8 tmp;
-
- id = (slot - 1) / 2;
- if (slot & 0x1) {
- tmp = c_list[id] & 0xf;
- c_list[id] = (value << 4) | tmp;
- } else {
- tmp = c_list[id] & 0xf0;
- c_list[id] = (value & 0xf) | tmp;
- }
-}
-
-/**
- * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
- *
- * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
- * Specification.
- */
-static void srpt_get_class_port_info(struct ib_dm_mad *mad)
-{
- struct ib_class_port_info *cif;
-
- cif = (struct ib_class_port_info *)mad->data;
- memset(cif, 0, sizeof *cif);
- cif->base_version = 1;
- cif->class_version = 1;
- cif->resp_time_value = 20;
-
- mad->mad_hdr.status = 0;
-}
-
-/**
- * srpt_get_iou() - Write IOUnitInfo to a management datagram.
- *
- * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
- * Specification. See also section B.7, table B.6 in the SRP r16a document.
- */
-static void srpt_get_iou(struct ib_dm_mad *mad)
-{
- struct ib_dm_iou_info *ioui;
- u8 slot;
- int i;
-
- ioui = (struct ib_dm_iou_info *)mad->data;
- ioui->change_id = __constant_cpu_to_be16(1);
- ioui->max_controllers = 16;
-
- /* set present for slot 1 and empty for the rest */
- srpt_set_ioc(ioui->controller_list, 1, 1);
- for (i = 1, slot = 2; i < 16; i++, slot++)
- srpt_set_ioc(ioui->controller_list, slot, 0);
-
- mad->mad_hdr.status = 0;
-}
-
-/**
- * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
- *
- * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
- * Architecture Specification. See also section B.7, table B.7 in the SRP
- * r16a document.
- */
-static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
- struct ib_dm_mad *mad)
-{
- struct srpt_device *sdev = sport->sdev;
- struct ib_dm_ioc_profile *iocp;
-
- iocp = (struct ib_dm_ioc_profile *)mad->data;
-
- if (!slot || slot > 16) {
- mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
- return;
- }
-
- if (slot > 2) {
- mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
- return;
- }
-
- memset(iocp, 0, sizeof *iocp);
- strcpy(iocp->id_string, SRPT_ID_STRING);
- iocp->guid = cpu_to_be64(srpt_service_guid);
- iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
- iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
- iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
- iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
- iocp->subsys_device_id = 0x0;
- iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
- iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
- iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
- iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
- iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
- iocp->rdma_read_depth = 4;
- iocp->send_size = cpu_to_be32(srp_max_req_size);
- iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
- 1U << 24));
- iocp->num_svc_entries = 1;
- iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
- SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
-
- mad->mad_hdr.status = 0;
-}
-
-/**
- * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
- *
- * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
- * Specification. See also section B.7, table B.8 in the SRP r16a document.
- */
-static void srpt_get_svc_entries(u64 ioc_guid,
- u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
-{
- struct ib_dm_svc_entries *svc_entries;
-
- WARN_ON(!ioc_guid);
-
- if (!slot || slot > 16) {
- mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
- return;
- }
-
- if (slot > 2 || lo > hi || hi > 1) {
- mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
- return;
- }
-
- svc_entries = (struct ib_dm_svc_entries *)mad->data;
- memset(svc_entries, 0, sizeof *svc_entries);
- svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
- snprintf(svc_entries->service_entries[0].name,
- sizeof(svc_entries->service_entries[0].name),
- "%s%016llx",
- SRP_SERVICE_NAME_PREFIX,
- ioc_guid);
-
- mad->mad_hdr.status = 0;
-}
-
-/**
- * srpt_mgmt_method_get() - Process a received management datagram.
- * @sp: source port through which the MAD has been received.
- * @rq_mad: received MAD.
- * @rsp_mad: response MAD.
- */
-static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
- struct ib_dm_mad *rsp_mad)
-{
- u16 attr_id;
- u32 slot;
- u8 hi, lo;
-
- attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
- switch (attr_id) {
- case DM_ATTR_CLASS_PORT_INFO:
- srpt_get_class_port_info(rsp_mad);
- break;
- case DM_ATTR_IOU_INFO:
- srpt_get_iou(rsp_mad);
- break;
- case DM_ATTR_IOC_PROFILE:
- slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
- srpt_get_ioc(sp, slot, rsp_mad);
- break;
- case DM_ATTR_SVC_ENTRIES:
- slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
- hi = (u8) ((slot >> 8) & 0xff);
- lo = (u8) (slot & 0xff);
- slot = (u16) ((slot >> 16) & 0xffff);
- srpt_get_svc_entries(srpt_service_guid,
- slot, hi, lo, rsp_mad);
- break;
- default:
- rsp_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
- break;
- }
-}
-
-/**
- * srpt_mad_send_handler() - Post MAD-send callback function.
- */
-static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_wc *mad_wc)
-{
- ib_destroy_ah(mad_wc->send_buf->ah);
- ib_free_send_mad(mad_wc->send_buf);
-}
-
-/**
- * srpt_mad_recv_handler() - MAD reception callback function.
- */
-static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
- struct ib_mad_recv_wc *mad_wc)
-{
- struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
- struct ib_ah *ah;
- struct ib_mad_send_buf *rsp;
- struct ib_dm_mad *dm_mad;
-
- if (!mad_wc || !mad_wc->recv_buf.mad)
- return;
-
- ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
- mad_wc->recv_buf.grh, mad_agent->port_num);
- if (IS_ERR(ah))
- goto err;
-
- BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
-
- rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
- mad_wc->wc->pkey_index, 0,
- IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
- GFP_KERNEL);
- if (IS_ERR(rsp))
- goto err_rsp;
-
- rsp->ah = ah;
-
- dm_mad = rsp->mad;
- memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
- dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
- dm_mad->mad_hdr.status = 0;
-
- switch (mad_wc->recv_buf.mad->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
- break;
- case IB_MGMT_METHOD_SET:
- dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
- break;
- default:
- dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
- break;
- }
-
- if (!ib_post_send_mad(rsp, NULL)) {
- ib_free_recv_mad(mad_wc);
- /* will destroy_ah & free_send_mad in send completion */
- return;
- }
-
- ib_free_send_mad(rsp);
-
-err_rsp:
- ib_destroy_ah(ah);
-err:
- ib_free_recv_mad(mad_wc);
-}
-
-/**
- * srpt_refresh_port() - Configure a HCA port.
- *
- * Enable InfiniBand management datagram processing, update the cached sm_lid,
- * lid and gid values, and register a callback function for processing MADs
- * on the specified port.
- *
- * Note: It is safe to call this function more than once for the same port.
- */
-static int srpt_refresh_port(struct srpt_port *sport)
-{
- struct ib_mad_reg_req reg_req;
- struct ib_port_modify port_modify;
- struct ib_port_attr port_attr;
- int ret;
-
- memset(&port_modify, 0, sizeof port_modify);
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- port_modify.clr_port_cap_mask = 0;
-
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
- if (ret)
- goto err_mod_port;
-
- ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
- if (ret)
- goto err_query_port;
-
- sport->sm_lid = port_attr.sm_lid;
- sport->lid = port_attr.lid;
-
- ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
- if (ret)
- goto err_query_port;
-
- if (!sport->mad_agent) {
- memset(®_req, 0, sizeof reg_req);
- reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
- reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
- set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
- set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
-
- sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
- sport->port,
- IB_QPT_GSI,
- ®_req, 0,
- srpt_mad_send_handler,
- srpt_mad_recv_handler,
- sport);
- if (IS_ERR(sport->mad_agent)) {
- ret = PTR_ERR(sport->mad_agent);
- sport->mad_agent = NULL;
- goto err_query_port;
- }
- }
-
- return 0;
-
-err_query_port:
-
- port_modify.set_port_cap_mask = 0;
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-
-err_mod_port:
-
- return ret;
-}
-
-/**
- * srpt_unregister_mad_agent() - Unregister MAD callback functions.
- *
- * Note: It is safe to call this function more than once for the same device.
- */
-static void srpt_unregister_mad_agent(struct srpt_device *sdev)
-{
- struct ib_port_modify port_modify = {
- .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
- };
- struct srpt_port *sport;
- int i;
-
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
- sport = &sdev->port[i - 1];
- WARN_ON(sport->port != i);
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- printk(KERN_ERR "disabling MAD processing failed.\n");
- if (sport->mad_agent) {
- ib_unregister_mad_agent(sport->mad_agent);
- sport->mad_agent = NULL;
- }
- }
-}
-
-/**
- * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
- */
-static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
- int ioctx_size, int dma_size,
- enum dma_data_direction dir)
-{
- struct srpt_ioctx *ioctx;
-
- ioctx = kmalloc(ioctx_size, GFP_KERNEL);
- if (!ioctx)
- goto err;
-
- ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
- if (!ioctx->buf)
- goto err_free_ioctx;
-
- ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
- if (ib_dma_mapping_error(sdev->device, ioctx->dma))
- goto err_free_buf;
-
- return ioctx;
-
-err_free_buf:
- kfree(ioctx->buf);
-err_free_ioctx:
- kfree(ioctx);
-err:
- return NULL;
-}
-
-/**
- * srpt_free_ioctx() - Free an SRPT I/O context structure.
- */
-static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
- int dma_size, enum dma_data_direction dir)
-{
- if (!ioctx)
- return;
-
- ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
- kfree(ioctx->buf);
- kfree(ioctx);
-}
-
-/**
- * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
- * @sdev: Device to allocate the I/O context ring for.
- * @ring_size: Number of elements in the I/O context ring.
- * @ioctx_size: I/O context size.
- * @dma_size: DMA buffer size.
- * @dir: DMA data direction.
- */
-static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
- int ring_size, int ioctx_size,
- int dma_size, enum dma_data_direction dir)
-{
- struct srpt_ioctx **ring;
- int i;
-
- WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
- && ioctx_size != sizeof(struct srpt_send_ioctx));
-
- ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
- if (!ring)
- goto out;
- for (i = 0; i < ring_size; ++i) {
- ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
- if (!ring[i])
- goto err;
- ring[i]->index = i;
- }
- goto out;
-
-err:
- while (--i >= 0)
- srpt_free_ioctx(sdev, ring[i], dma_size, dir);
- kfree(ring);
-out:
- return ring;
-}
-
-/**
- * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
- */
-static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
- struct srpt_device *sdev, int ring_size,
- int dma_size, enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < ring_size; ++i)
- srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
- kfree(ioctx_ring);
-}
-
-/**
- * srpt_get_cmd_state() - Get the state of a SCSI command.
- */
-static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
- return state;
-}
-
-/**
- * srpt_set_cmd_state() - Set the state of a SCSI command.
- *
- * Does not modify the state of aborted commands. Returns the previous command
- * state.
- */
-static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
- enum srpt_command_state new)
-{
- enum srpt_command_state previous;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- previous = ioctx->state;
- if (previous != SRPT_STATE_DONE)
- ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
-
- return previous;
-}
-
-/**
- * srpt_test_and_set_cmd_state() - Test and set the state of a command.
- *
- * Returns true if and only if the previous command state was equal to 'old'.
- */
-static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
- enum srpt_command_state old,
- enum srpt_command_state new)
-{
- enum srpt_command_state previous;
- unsigned long flags;
-
- WARN_ON(!ioctx);
- WARN_ON(old == SRPT_STATE_DONE);
- WARN_ON(new == SRPT_STATE_NEW);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- previous = ioctx->state;
- if (previous == old)
- ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
- return previous == old;
-}
-
-/**
- * srpt_post_recv() - Post an IB receive request.
- */
-static int srpt_post_recv(struct srpt_device *sdev,
- struct srpt_recv_ioctx *ioctx)
-{
- struct ib_sge list;
- struct ib_recv_wr wr, *bad_wr;
-
- BUG_ON(!sdev);
- wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
-
- list.addr = ioctx->ioctx.dma;
- list.length = srp_max_req_size;
- list.lkey = sdev->mr->lkey;
-
- wr.next = NULL;
- wr.sg_list = &list;
- wr.num_sge = 1;
-
- return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
-}
-
-/**
- * srpt_post_send() - Post an IB send request.
- *
- * Returns zero upon success and a non-zero value upon failure.
- */
-static int srpt_post_send(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx, int len)
-{
- struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
- struct srpt_device *sdev = ch->sport->sdev;
- int ret;
-
- atomic_inc(&ch->req_lim);
-
- ret = -ENOMEM;
- if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
- printk(KERN_WARNING "IB send queue full (needed 1)\n");
- goto out;
- }
-
- ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
- DMA_TO_DEVICE);
-
- list.addr = ioctx->ioctx.dma;
- list.length = len;
- list.lkey = sdev->mr->lkey;
-
- wr.next = NULL;
- wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(ch->qp, &wr, &bad_wr);
-
-out:
- if (ret < 0) {
- atomic_inc(&ch->sq_wr_avail);
- atomic_dec(&ch->req_lim);
- }
- return ret;
-}
-
-/**
- * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
- * @ioctx: Pointer to the I/O context associated with the request.
- * @srp_cmd: Pointer to the SRP_CMD request data.
- * @dir: Pointer to the variable to which the transfer direction will be
- * written.
- * @data_len: Pointer to the variable to which the total data length of all
- * descriptors in the SRP_CMD request will be written.
- *
- * This function initializes ioctx->nrbuf and ioctx->r_bufs.
- *
- * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
- * -ENOMEM when memory allocation fails and zero upon success.
- */
-static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
- struct srp_cmd *srp_cmd,
- enum dma_data_direction *dir, u64 *data_len)
-{
- struct srp_indirect_buf *idb;
- struct srp_direct_buf *db;
- unsigned add_cdb_offset;
- int ret;
-
- /*
- * The pointer computations below will only be compiled correctly
- * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
- * whether srp_cmd::add_data has been declared as a byte pointer.
- */
- BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
- && !__same_type(srp_cmd->add_data[0], (u8)0));
-
- BUG_ON(!dir);
- BUG_ON(!data_len);
-
- ret = 0;
- *data_len = 0;
-
- /*
- * The lower four bits of the buffer format field contain the DATA-IN
- * buffer descriptor format, and the highest four bits contain the
- * DATA-OUT buffer descriptor format.
- */
- *dir = DMA_NONE;
- if (srp_cmd->buf_fmt & 0xf)
- /* DATA-IN: transfer data from target to initiator (read). */
- *dir = DMA_FROM_DEVICE;
- else if (srp_cmd->buf_fmt >> 4)
- /* DATA-OUT: transfer data from initiator to target (write). */
- *dir = DMA_TO_DEVICE;
-
- /*
- * According to the SRP spec, the lower two bits of the 'ADDITIONAL
- * CDB LENGTH' field are reserved and the size in bytes of this field
- * is four times the value specified in bits 3..7. Hence the "& ~3".
- */
- add_cdb_offset = srp_cmd->add_cdb_len & ~3;
- if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
- ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
- ioctx->n_rbuf = 1;
- ioctx->rbufs = &ioctx->single_rbuf;
-
- db = (struct srp_direct_buf *)(srp_cmd->add_data
- + add_cdb_offset);
- memcpy(ioctx->rbufs, db, sizeof *db);
- *data_len = be32_to_cpu(db->len);
- } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
- ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
- idb = (struct srp_indirect_buf *)(srp_cmd->add_data
- + add_cdb_offset);
-
- ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
-
- if (ioctx->n_rbuf >
- (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
- printk(KERN_ERR "received unsupported SRP_CMD request"
- " type (%u out + %u in != %u / %zu)\n",
- srp_cmd->data_out_desc_cnt,
- srp_cmd->data_in_desc_cnt,
- be32_to_cpu(idb->table_desc.len),
- sizeof(*db));
- ioctx->n_rbuf = 0;
- ret = -EINVAL;
- goto out;
- }
-
- if (ioctx->n_rbuf == 1)
- ioctx->rbufs = &ioctx->single_rbuf;
- else {
- ioctx->rbufs =
- kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
- if (!ioctx->rbufs) {
- ioctx->n_rbuf = 0;
- ret = -ENOMEM;
- goto out;
- }
- }
-
- db = idb->desc_list;
- memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
- *data_len = be32_to_cpu(idb->len);
- }
-out:
- return ret;
-}
-
-/**
- * srpt_init_ch_qp() - Initialize queue pair attributes.
- *
- * Initialized the attributes of queue pair 'qp' by allowing local write,
- * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
- */
-static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
-{
- struct ib_qp_attr *attr;
- int ret;
-
- attr = kzalloc(sizeof *attr, GFP_KERNEL);
- if (!attr)
- return -ENOMEM;
-
- attr->qp_state = IB_QPS_INIT;
- attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
- attr->port_num = ch->sport->port;
- attr->pkey_index = 0;
-
- ret = ib_modify_qp(qp, attr,
- IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
- IB_QP_PKEY_INDEX);
-
- kfree(attr);
- return ret;
-}
-
-/**
- * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
- * @ch: channel of the queue pair.
- * @qp: queue pair to change the state of.
- *
- * Returns zero upon success and a negative value upon failure.
- *
- * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
- * If this structure ever becomes larger, it might be necessary to allocate
- * it dynamically instead of on the stack.
- */
-static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
-{
- struct ib_qp_attr qp_attr;
- int attr_mask;
- int ret;
-
- qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
- if (ret)
- goto out;
-
- qp_attr.max_dest_rd_atomic = 4;
-
- ret = ib_modify_qp(qp, &qp_attr, attr_mask);
-
-out:
- return ret;
-}
-
-/**
- * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
- * @ch: channel of the queue pair.
- * @qp: queue pair to change the state of.
- *
- * Returns zero upon success and a negative value upon failure.
- *
- * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
- * If this structure ever becomes larger, it might be necessary to allocate
- * it dynamically instead of on the stack.
- */
-static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
-{
- struct ib_qp_attr qp_attr;
- int attr_mask;
- int ret;
-
- qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
- if (ret)
- goto out;
-
- qp_attr.max_rd_atomic = 4;
-
- ret = ib_modify_qp(qp, &qp_attr, attr_mask);
-
-out:
- return ret;
-}
-
-/**
- * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
- */
-static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
-{
- struct ib_qp_attr qp_attr;
-
- qp_attr.qp_state = IB_QPS_ERR;
- return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
-}
-
-/**
- * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
- */
-static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct scatterlist *sg;
- enum dma_data_direction dir;
-
- BUG_ON(!ch);
- BUG_ON(!ioctx);
- BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
-
- while (ioctx->n_rdma)
- kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
-
- kfree(ioctx->rdma_ius);
- ioctx->rdma_ius = NULL;
-
- if (ioctx->mapped_sg_count) {
- sg = ioctx->sg;
- WARN_ON(!sg);
- dir = ioctx->cmd.data_direction;
- BUG_ON(dir == DMA_NONE);
- ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
- opposite_dma_dir(dir));
- ioctx->mapped_sg_count = 0;
- }
-}
-
-/**
- * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
- */
-static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct se_cmd *cmd;
- struct scatterlist *sg, *sg_orig;
- int sg_cnt;
- enum dma_data_direction dir;
- struct rdma_iu *riu;
- struct srp_direct_buf *db;
- dma_addr_t dma_addr;
- struct ib_sge *sge;
- u64 raddr;
- u32 rsize;
- u32 tsize;
- u32 dma_len;
- int count, nrdma;
- int i, j, k;
-
- BUG_ON(!ch);
- BUG_ON(!ioctx);
- cmd = &ioctx->cmd;
- dir = cmd->data_direction;
- BUG_ON(dir == DMA_NONE);
-
- transport_do_task_sg_chain(cmd);
- ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
- ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
-
- count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
- opposite_dma_dir(dir));
- if (unlikely(!count))
- return -EAGAIN;
-
- ioctx->mapped_sg_count = count;
-
- if (ioctx->rdma_ius && ioctx->n_rdma_ius)
- nrdma = ioctx->n_rdma_ius;
- else {
- nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
- + ioctx->n_rbuf;
-
- ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
- if (!ioctx->rdma_ius)
- goto free_mem;
-
- ioctx->n_rdma_ius = nrdma;
- }
-
- db = ioctx->rbufs;
- tsize = cmd->data_length;
- dma_len = sg_dma_len(&sg[0]);
- riu = ioctx->rdma_ius;
-
- /*
- * For each remote desc - calculate the #ib_sge.
- * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
- * each remote desc rdma_iu is required a rdma wr;
- * else
- * we need to allocate extra rdma_iu to carry extra #ib_sge in
- * another rdma wr
- */
- for (i = 0, j = 0;
- j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
- rsize = be32_to_cpu(db->len);
- raddr = be64_to_cpu(db->va);
- riu->raddr = raddr;
- riu->rkey = be32_to_cpu(db->key);
- riu->sge_cnt = 0;
-
- /* calculate how many sge required for this remote_buf */
- while (rsize > 0 && tsize > 0) {
-
- if (rsize >= dma_len) {
- tsize -= dma_len;
- rsize -= dma_len;
- raddr += dma_len;
-
- if (tsize > 0) {
- ++j;
- if (j < count) {
- sg = sg_next(sg);
- dma_len = sg_dma_len(sg);
- }
- }
- } else {
- tsize -= rsize;
- dma_len -= rsize;
- rsize = 0;
- }
-
- ++riu->sge_cnt;
-
- if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
- ++ioctx->n_rdma;
- riu->sge =
- kmalloc(riu->sge_cnt * sizeof *riu->sge,
- GFP_KERNEL);
- if (!riu->sge)
- goto free_mem;
-
- ++riu;
- riu->sge_cnt = 0;
- riu->raddr = raddr;
- riu->rkey = be32_to_cpu(db->key);
- }
- }
-
- ++ioctx->n_rdma;
- riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
- GFP_KERNEL);
- if (!riu->sge)
- goto free_mem;
- }
-
- db = ioctx->rbufs;
- tsize = cmd->data_length;
- riu = ioctx->rdma_ius;
- sg = sg_orig;
- dma_len = sg_dma_len(&sg[0]);
- dma_addr = sg_dma_address(&sg[0]);
-
- /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
- for (i = 0, j = 0;
- j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
- rsize = be32_to_cpu(db->len);
- sge = riu->sge;
- k = 0;
-
- while (rsize > 0 && tsize > 0) {
- sge->addr = dma_addr;
- sge->lkey = ch->sport->sdev->mr->lkey;
-
- if (rsize >= dma_len) {
- sge->length =
- (tsize < dma_len) ? tsize : dma_len;
- tsize -= dma_len;
- rsize -= dma_len;
-
- if (tsize > 0) {
- ++j;
- if (j < count) {
- sg = sg_next(sg);
- dma_len = sg_dma_len(sg);
- dma_addr = sg_dma_address(sg);
- }
- }
- } else {
- sge->length = (tsize < rsize) ? tsize : rsize;
- tsize -= rsize;
- dma_len -= rsize;
- dma_addr += rsize;
- rsize = 0;
- }
-
- ++k;
- if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
- ++riu;
- sge = riu->sge;
- k = 0;
- } else if (rsize > 0 && tsize > 0)
- ++sge;
- }
- }
-
- return 0;
-
-free_mem:
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
-
- return -ENOMEM;
-}
-
-/**
- * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
- */
-static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
-{
- struct srpt_send_ioctx *ioctx;
- unsigned long flags;
-
- BUG_ON(!ch);
-
- ioctx = NULL;
- spin_lock_irqsave(&ch->spinlock, flags);
- if (!list_empty(&ch->free_list)) {
- ioctx = list_first_entry(&ch->free_list,
- struct srpt_send_ioctx, free_list);
- list_del(&ioctx->free_list);
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
-
- if (!ioctx)
- return ioctx;
-
- BUG_ON(ioctx->ch != ch);
- kref_init(&ioctx->kref);
- spin_lock_init(&ioctx->spinlock);
- ioctx->state = SRPT_STATE_NEW;
- ioctx->n_rbuf = 0;
- ioctx->rbufs = NULL;
- ioctx->n_rdma = 0;
- ioctx->n_rdma_ius = 0;
- ioctx->rdma_ius = NULL;
- ioctx->mapped_sg_count = 0;
- init_completion(&ioctx->tx_done);
- ioctx->queue_status_only = false;
- /*
- * transport_init_se_cmd() does not initialize all fields, so do it
- * here.
- */
- memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
- memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
-
- return ioctx;
-}
-
-/**
- * srpt_put_send_ioctx() - Free up resources.
- */
-static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
-
- BUG_ON(!ioctx);
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
-
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- transport_generic_free_cmd(&ioctx->cmd, 0);
-
- if (ioctx->n_rbuf > 1) {
- kfree(ioctx->rbufs);
- ioctx->rbufs = NULL;
- ioctx->n_rbuf = 0;
- }
-
- spin_lock_irqsave(&ch->spinlock, flags);
- list_add(&ioctx->free_list, &ch->free_list);
- spin_unlock_irqrestore(&ch->spinlock, flags);
-}
-
-static void srpt_put_send_ioctx_kref(struct kref *kref)
-{
- srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
-}
-
-/**
- * srpt_abort_cmd() - Abort a SCSI command.
- * @ioctx: I/O context associated with the SCSI command.
- * @context: Preferred execution context.
- */
-static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- /*
- * If the command is in a state where the target core is waiting for
- * the ib_srpt driver, change the state to the next state. Changing
- * the state of the command from SRPT_STATE_NEED_DATA to
- * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
- * function a second time.
- */
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- switch (state) {
- case SRPT_STATE_NEED_DATA:
- ioctx->state = SRPT_STATE_DATA_IN;
- break;
- case SRPT_STATE_DATA_IN:
- case SRPT_STATE_CMD_RSP_SENT:
- case SRPT_STATE_MGMT_RSP_SENT:
- ioctx->state = SRPT_STATE_DONE;
- break;
- default:
- break;
- }
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
-
- if (state == SRPT_STATE_DONE)
- goto out;
-
- pr_debug("Aborting cmd with state %d and tag %lld\n", state,
- ioctx->tag);
-
- switch (state) {
- case SRPT_STATE_NEW:
- case SRPT_STATE_DATA_IN:
- case SRPT_STATE_MGMT:
- /*
- * Do nothing - defer abort processing until
- * srpt_queue_response() is invoked.
- */
- WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
- break;
- case SRPT_STATE_NEED_DATA:
- /* DMA_TO_DEVICE (write) - RDMA read error. */
- atomic_set(&ioctx->cmd.transport_lun_stop, 1);
- transport_generic_handle_data(&ioctx->cmd);
- break;
- case SRPT_STATE_CMD_RSP_SENT:
- /*
- * SRP_RSP sending failed or the SRP_RSP send completion has
- * not been received in time.
- */
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- atomic_set(&ioctx->cmd.transport_lun_stop, 1);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- break;
- case SRPT_STATE_MGMT_RSP_SENT:
- srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- break;
- default:
- WARN_ON("ERROR: unexpected command state");
- break;
- }
-
-out:
- return state;
-}
-
-/**
- * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
- */
-static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
-{
- struct srpt_send_ioctx *ioctx;
- enum srpt_command_state state;
- struct se_cmd *cmd;
- u32 index;
-
- atomic_inc(&ch->sq_wr_avail);
-
- index = idx_from_wr_id(wr_id);
- ioctx = ch->ioctx_ring[index];
- state = srpt_get_cmd_state(ioctx);
- cmd = &ioctx->cmd;
-
- WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
- && state != SRPT_STATE_MGMT_RSP_SENT
- && state != SRPT_STATE_NEED_DATA
- && state != SRPT_STATE_DONE);
-
- /* If SRP_RSP sending failed, undo the ch->req_lim change. */
- if (state == SRPT_STATE_CMD_RSP_SENT
- || state == SRPT_STATE_MGMT_RSP_SENT)
- atomic_dec(&ch->req_lim);
-
- srpt_abort_cmd(ioctx);
-}
-
-/**
- * srpt_handle_send_comp() - Process an IB send completion notification.
- */
-static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
-
- atomic_inc(&ch->sq_wr_avail);
-
- state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-
- if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
- && state != SRPT_STATE_MGMT_RSP_SENT
- && state != SRPT_STATE_DONE))
- pr_debug("state = %d\n", state);
-
- if (state != SRPT_STATE_DONE)
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- else
- printk(KERN_ERR "IB completion has been received too late for"
- " wr_id = %u.\n", ioctx->ioctx.index);
-}
-
-/**
- * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
- *
- * Note: transport_generic_handle_data() is asynchronous so unmapping the
- * data that has been transferred via IB RDMA must be postponed until the
- * check_stop_free() callback.
- */
-static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx,
- enum srpt_opcode opcode)
-{
- WARN_ON(ioctx->n_rdma <= 0);
- atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
-
- if (opcode == SRPT_RDMA_READ_LAST) {
- if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
- SRPT_STATE_DATA_IN))
- transport_generic_handle_data(&ioctx->cmd);
- else
- printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
- __LINE__, srpt_get_cmd_state(ioctx));
- } else if (opcode == SRPT_RDMA_ABORT) {
- ioctx->rdma_aborted = true;
- } else {
- WARN(true, "unexpected opcode %d\n", opcode);
- }
-}
-
-/**
- * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
- */
-static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx,
- enum srpt_opcode opcode)
-{
- struct se_cmd *cmd;
- enum srpt_command_state state;
-
- cmd = &ioctx->cmd;
- state = srpt_get_cmd_state(ioctx);
- switch (opcode) {
- case SRPT_RDMA_READ_LAST:
- if (ioctx->n_rdma <= 0) {
- printk(KERN_ERR "Received invalid RDMA read"
- " error completion with idx %d\n",
- ioctx->ioctx.index);
- break;
- }
- atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
- if (state == SRPT_STATE_NEED_DATA)
- srpt_abort_cmd(ioctx);
- else
- printk(KERN_ERR "%s[%d]: wrong state = %d\n",
- __func__, __LINE__, state);
- break;
- case SRPT_RDMA_WRITE_LAST:
- atomic_set(&ioctx->cmd.transport_lun_stop, 1);
- break;
- default:
- printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
- __LINE__, opcode);
- break;
- }
-}
-
-/**
- * srpt_build_cmd_rsp() - Build an SRP_RSP response.
- * @ch: RDMA channel through which the request has been received.
- * @ioctx: I/O context associated with the SRP_CMD request. The response will
- * be built in the buffer ioctx->buf points at and hence this function will
- * overwrite the request data.
- * @tag: tag of the request for which this response is being generated.
- * @status: value for the STATUS field of the SRP_RSP information unit.
- *
- * Returns the size in bytes of the SRP_RSP response.
- *
- * An SRP_RSP response contains a SCSI status or service response. See also
- * section 6.9 in the SRP r16a document for the format of an SRP_RSP
- * response. See also SPC-2 for more information about sense data.
- */
-static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx, u64 tag,
- int status)
-{
- struct srp_rsp *srp_rsp;
- const u8 *sense_data;
- int sense_data_len, max_sense_len;
-
- /*
- * The lowest bit of all SAM-3 status codes is zero (see also
- * paragraph 5.3 in SAM-3).
- */
- WARN_ON(status & 1);
-
- srp_rsp = ioctx->ioctx.buf;
- BUG_ON(!srp_rsp);
-
- sense_data = ioctx->sense_data;
- sense_data_len = ioctx->cmd.scsi_sense_length;
- WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
-
- memset(srp_rsp, 0, sizeof *srp_rsp);
- srp_rsp->opcode = SRP_RSP;
- srp_rsp->req_lim_delta =
- __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
- srp_rsp->tag = tag;
- srp_rsp->status = status;
-
- if (sense_data_len) {
- BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
- max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
- if (sense_data_len > max_sense_len) {
- printk(KERN_WARNING "truncated sense data from %d to %d"
- " bytes\n", sense_data_len, max_sense_len);
- sense_data_len = max_sense_len;
- }
-
- srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
- srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
- memcpy(srp_rsp + 1, sense_data, sense_data_len);
- }
-
- return sizeof(*srp_rsp) + sense_data_len;
-}
-
-/**
- * srpt_build_tskmgmt_rsp() - Build a task management response.
- * @ch: RDMA channel through which the request has been received.
- * @ioctx: I/O context in which the SRP_RSP response will be built.
- * @rsp_code: RSP_CODE that will be stored in the response.
- * @tag: Tag of the request for which this response is being generated.
- *
- * Returns the size in bytes of the SRP_RSP response.
- *
- * An SRP_RSP response contains a SCSI status or service response. See also
- * section 6.9 in the SRP r16a document for the format of an SRP_RSP
- * response.
- */
-static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx,
- u8 rsp_code, u64 tag)
-{
- struct srp_rsp *srp_rsp;
- int resp_data_len;
- int resp_len;
-
- resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
- resp_len = sizeof(*srp_rsp) + resp_data_len;
-
- srp_rsp = ioctx->ioctx.buf;
- BUG_ON(!srp_rsp);
- memset(srp_rsp, 0, sizeof *srp_rsp);
-
- srp_rsp->opcode = SRP_RSP;
- srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
- + atomic_xchg(&ch->req_lim_delta, 0));
- srp_rsp->tag = tag;
-
- if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
- srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
- srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
- srp_rsp->data[3] = rsp_code;
- }
-
- return resp_len;
-}
-
-#define NO_SUCH_LUN ((uint64_t)-1LL)
-
-/*
- * SCSI LUN addressing method. See also SAM-2 and the section about
- * eight byte LUNs.
- */
-enum scsi_lun_addr_method {
- SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
- SCSI_LUN_ADDR_METHOD_FLAT = 1,
- SCSI_LUN_ADDR_METHOD_LUN = 2,
- SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
-};
-
-/*
- * srpt_unpack_lun() - Convert from network LUN to linear LUN.
- *
- * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
- * order (big endian) to a linear LUN. Supports three LUN addressing methods:
- * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
- */
-static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
-{
- uint64_t res = NO_SUCH_LUN;
- int addressing_method;
-
- if (unlikely(len < 2)) {
- printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
- "more", len);
- goto out;
- }
-
- switch (len) {
- case 8:
- if ((*((__be64 *)lun) &
- __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
- goto out_err;
- break;
- case 4:
- if (*((__be16 *)&lun[2]) != 0)
- goto out_err;
- break;
- case 6:
- if (*((__be32 *)&lun[2]) != 0)
- goto out_err;
- break;
- case 2:
- break;
- default:
- goto out_err;
- }
-
- addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
- switch (addressing_method) {
- case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
- case SCSI_LUN_ADDR_METHOD_FLAT:
- case SCSI_LUN_ADDR_METHOD_LUN:
- res = *(lun + 1) | (((*lun) & 0x3f) << 8);
- break;
-
- case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
- default:
- printk(KERN_ERR "Unimplemented LUN addressing method %u",
- addressing_method);
- break;
- }
-
-out:
- return res;
-
-out_err:
- printk(KERN_ERR "Support for multi-level LUNs has not yet been"
- " implemented");
- goto out;
-}
-
-static int srpt_check_stop_free(struct se_cmd *cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
-}
-
-/**
- * srpt_handle_cmd() - Process SRP_CMD.
- */
-static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
-{
- struct se_cmd *cmd;
- struct srp_cmd *srp_cmd;
- uint64_t unpacked_lun;
- u64 data_len;
- enum dma_data_direction dir;
- int ret;
-
- BUG_ON(!send_ioctx);
-
- srp_cmd = recv_ioctx->ioctx.buf;
- kref_get(&send_ioctx->kref);
- cmd = &send_ioctx->cmd;
- send_ioctx->tag = srp_cmd->tag;
-
- switch (srp_cmd->task_attr) {
- case SRP_CMD_SIMPLE_Q:
- cmd->sam_task_attr = MSG_SIMPLE_TAG;
- break;
- case SRP_CMD_ORDERED_Q:
- default:
- cmd->sam_task_attr = MSG_ORDERED_TAG;
- break;
- case SRP_CMD_HEAD_OF_Q:
- cmd->sam_task_attr = MSG_HEAD_TAG;
- break;
- case SRP_CMD_ACA:
- cmd->sam_task_attr = MSG_ACA_TAG;
- break;
- }
-
- ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
- if (ret) {
- printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
- srp_cmd->tag);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- goto send_sense;
- }
-
- cmd->data_length = data_len;
- cmd->data_direction = dir;
- unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
- sizeof(srp_cmd->lun));
- if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
- goto send_sense;
- ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
- if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- srpt_queue_status(cmd);
- else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
- goto send_sense;
- else
- WARN_ON_ONCE(ret);
-
- transport_handle_cdb_direct(cmd);
- return 0;
-
-send_sense:
- transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
- 0);
- return -1;
-}
-
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
-
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
-}
-
-static int srp_tmr_to_tcm(int fn)
-{
- switch (fn) {
- case SRP_TSK_ABORT_TASK:
- return TMR_ABORT_TASK;
- case SRP_TSK_ABORT_TASK_SET:
- return TMR_ABORT_TASK_SET;
- case SRP_TSK_CLEAR_TASK_SET:
- return TMR_CLEAR_TASK_SET;
- case SRP_TSK_LUN_RESET:
- return TMR_LUN_RESET;
- case SRP_TSK_CLEAR_ACA:
- return TMR_CLEAR_ACA;
- default:
- return -1;
- }
-}
-
-/**
- * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
- *
- * Returns 0 if and only if the request will be processed by the target core.
- *
- * For more information about SRP_TSK_MGMT information units, see also section
- * 6.7 in the SRP r16a document.
- */
-static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
-{
- struct srp_tsk_mgmt *srp_tsk;
- struct se_cmd *cmd;
- uint64_t unpacked_lun;
- int tcm_tmr;
- int res;
-
- BUG_ON(!send_ioctx);
-
- srp_tsk = recv_ioctx->ioctx.buf;
- cmd = &send_ioctx->cmd;
-
- pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
- " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
- srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
-
- srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
- send_ioctx->tag = srp_tsk->tag;
- tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto process_tmr;
- }
- cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
- if (!cmd->se_tmr_req) {
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
- goto process_tmr;
- }
-
- unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
- sizeof(srp_tsk->lun));
- res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
- if (res) {
- pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
- send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
- goto process_tmr;
- }
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
- srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
-
-process_tmr:
- kref_get(&send_ioctx->kref);
- if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
- transport_generic_handle_tmr(&send_ioctx->cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
-
-}
-
-/**
- * srpt_handle_new_iu() - Process a newly received information unit.
- * @ch: RDMA channel through which the information unit has been received.
- * @ioctx: SRPT I/O context associated with the information unit.
- */
-static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
-{
- struct srp_cmd *srp_cmd;
- enum rdma_ch_state ch_state;
-
- BUG_ON(!ch);
- BUG_ON(!recv_ioctx);
-
- ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
- recv_ioctx->ioctx.dma, srp_max_req_size,
- DMA_FROM_DEVICE);
-
- ch_state = srpt_get_ch_state(ch);
- if (unlikely(ch_state == CH_CONNECTING)) {
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
- goto out;
- }
-
- if (unlikely(ch_state != CH_LIVE))
- goto out;
-
- srp_cmd = recv_ioctx->ioctx.buf;
- if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx)
- send_ioctx = srpt_get_send_ioctx(ch);
- if (unlikely(!send_ioctx)) {
- list_add_tail(&recv_ioctx->wait_list,
- &ch->cmd_wait_list);
- goto out;
- }
- }
-
- transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
- 0, DMA_NONE, MSG_SIMPLE_TAG,
- send_ioctx->sense_data);
-
- switch (srp_cmd->opcode) {
- case SRP_CMD:
- srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
- break;
- case SRP_TSK_MGMT:
- srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
- break;
- case SRP_I_LOGOUT:
- printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
- break;
- case SRP_CRED_RSP:
- pr_debug("received SRP_CRED_RSP\n");
- break;
- case SRP_AER_RSP:
- pr_debug("received SRP_AER_RSP\n");
- break;
- case SRP_RSP:
- printk(KERN_ERR "Received SRP_RSP\n");
- break;
- default:
- printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
- srp_cmd->opcode);
- break;
- }
-
- srpt_post_recv(ch->sport->sdev, recv_ioctx);
-out:
- return;
-}
-
-static void srpt_process_rcv_completion(struct ib_cq *cq,
- struct srpt_rdma_ch *ch,
- struct ib_wc *wc)
-{
- struct srpt_device *sdev = ch->sport->sdev;
- struct srpt_recv_ioctx *ioctx;
- u32 index;
-
- index = idx_from_wr_id(wc->wr_id);
- if (wc->status == IB_WC_SUCCESS) {
- int req_lim;
-
- req_lim = atomic_dec_return(&ch->req_lim);
- if (unlikely(req_lim < 0))
- printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
- ioctx = sdev->ioctx_ring[index];
- srpt_handle_new_iu(ch, ioctx, NULL);
- } else {
- printk(KERN_INFO "receiving failed for idx %u with status %d\n",
- index, wc->status);
- }
-}
-
-/**
- * srpt_process_send_completion() - Process an IB send completion.
- *
- * Note: Although this has not yet been observed during tests, at least in
- * theory it is possible that the srpt_get_send_ioctx() call invoked by
- * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
- * value in each response is set to one, and it is possible that this response
- * makes the initiator send a new request before the send completion for that
- * response has been processed. This could e.g. happen if the call to
- * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
- * if IB retransmission causes generation of the send completion to be
- * delayed. Incoming information units for which srpt_get_send_ioctx() fails
- * are queued on cmd_wait_list. The code below processes these delayed
- * requests one at a time.
- */
-static void srpt_process_send_completion(struct ib_cq *cq,
- struct srpt_rdma_ch *ch,
- struct ib_wc *wc)
-{
- struct srpt_send_ioctx *send_ioctx;
- uint32_t index;
- enum srpt_opcode opcode;
-
- index = idx_from_wr_id(wc->wr_id);
- opcode = opcode_from_wr_id(wc->wr_id);
- send_ioctx = ch->ioctx_ring[index];
- if (wc->status == IB_WC_SUCCESS) {
- if (opcode == SRPT_SEND)
- srpt_handle_send_comp(ch, send_ioctx);
- else {
- WARN_ON(opcode != SRPT_RDMA_ABORT &&
- wc->opcode != IB_WC_RDMA_READ);
- srpt_handle_rdma_comp(ch, send_ioctx, opcode);
- }
- } else {
- if (opcode == SRPT_SEND) {
- printk(KERN_INFO "sending response for idx %u failed"
- " with status %d\n", index, wc->status);
- srpt_handle_send_err_comp(ch, wc->wr_id);
- } else if (opcode != SRPT_RDMA_MID) {
- printk(KERN_INFO "RDMA t %d for idx %u failed with"
- " status %d", opcode, index, wc->status);
- srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
- }
- }
-
- while (unlikely(opcode == SRPT_SEND
- && !list_empty(&ch->cmd_wait_list)
- && srpt_get_ch_state(ch) == CH_LIVE
- && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
- struct srpt_recv_ioctx *recv_ioctx;
-
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
- }
-}
-
-static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
-{
- struct ib_wc *const wc = ch->wc;
- int i, n;
-
- WARN_ON(cq != ch->cq);
-
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
- for (i = 0; i < n; i++) {
- if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
- srpt_process_rcv_completion(cq, ch, &wc[i]);
- else
- srpt_process_send_completion(cq, ch, &wc[i]);
- }
- }
-}
-
-/**
- * srpt_completion() - IB completion queue callback function.
- *
- * Notes:
- * - It is guaranteed that a completion handler will never be invoked
- * concurrently on two different CPUs for the same completion queue. See also
- * Documentation/infiniband/core_locking.txt and the implementation of
- * handle_edge_irq() in kernel/irq/chip.c.
- * - When threaded IRQs are enabled, completion handlers are invoked in thread
- * context instead of interrupt context.
- */
-static void srpt_completion(struct ib_cq *cq, void *ctx)
-{
- struct srpt_rdma_ch *ch = ctx;
-
- wake_up_interruptible(&ch->wait_queue);
-}
-
-static int srpt_compl_thread(void *arg)
-{
- struct srpt_rdma_ch *ch;
-
- /* Hibernation / freezing of the SRPT kernel thread is not supported. */
- current->flags |= PF_NOFREEZE;
-
- ch = arg;
- BUG_ON(!ch);
- printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
- ch->sess_name, ch->thread->comm, current->pid);
- while (!kthread_should_stop()) {
- wait_event_interruptible(ch->wait_queue,
- (srpt_process_completion(ch->cq, ch),
- kthread_should_stop()));
- }
- printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
- ch->sess_name, ch->thread->comm, current->pid);
- return 0;
-}
-
-/**
- * srpt_create_ch_ib() - Create receive and send completion queues.
- */
-static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
-{
- struct ib_qp_init_attr *qp_init;
- struct srpt_port *sport = ch->sport;
- struct srpt_device *sdev = sport->sdev;
- u32 srp_sq_size = sport->port_attrib.srp_sq_size;
- int ret;
-
- WARN_ON(ch->rq_size < 1);
-
- ret = -ENOMEM;
- qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
- if (!qp_init)
- goto out;
-
- ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
- ch->rq_size + srp_sq_size, 0);
- if (IS_ERR(ch->cq)) {
- ret = PTR_ERR(ch->cq);
- printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + srp_sq_size, ret);
- goto out;
- }
-
- qp_init->qp_context = (void *)ch;
- qp_init->event_handler
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
- qp_init->send_cq = ch->cq;
- qp_init->recv_cq = ch->cq;
- qp_init->srq = sdev->srq;
- qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
- qp_init->qp_type = IB_QPT_RC;
- qp_init->cap.max_send_wr = srp_sq_size;
- qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
-
- ch->qp = ib_create_qp(sdev->pd, qp_init);
- if (IS_ERR(ch->qp)) {
- ret = PTR_ERR(ch->qp);
- printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
- goto err_destroy_cq;
- }
-
- atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
-
- pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
- __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
- qp_init->cap.max_send_wr, ch->cm_id);
-
- ret = srpt_init_ch_qp(ch, ch->qp);
- if (ret)
- goto err_destroy_qp;
-
- init_waitqueue_head(&ch->wait_queue);
-
- pr_debug("creating thread for session %s\n", ch->sess_name);
-
- ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
- if (IS_ERR(ch->thread)) {
- printk(KERN_ERR "failed to create kernel thread %ld\n",
- PTR_ERR(ch->thread));
- ch->thread = NULL;
- goto err_destroy_qp;
- }
-
-out:
- kfree(qp_init);
- return ret;
-
-err_destroy_qp:
- ib_destroy_qp(ch->qp);
-err_destroy_cq:
- ib_destroy_cq(ch->cq);
- goto out;
-}
-
-static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
-{
- if (ch->thread)
- kthread_stop(ch->thread);
-
- ib_destroy_qp(ch->qp);
- ib_destroy_cq(ch->cq);
-}
-
-/**
- * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
- *
- * Reset the QP and make sure all resources associated with the channel will
- * be deallocated at an appropriate time.
- *
- * Note: The caller must hold ch->sport->sdev->spinlock.
- */
-static void __srpt_close_ch(struct srpt_rdma_ch *ch)
-{
- struct srpt_device *sdev;
- enum rdma_ch_state prev_state;
- unsigned long flags;
-
- sdev = ch->sport->sdev;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev_state = ch->state;
- switch (prev_state) {
- case CH_CONNECTING:
- case CH_LIVE:
- ch->state = CH_DISCONNECTING;
- break;
- default:
- break;
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
-
- switch (prev_state) {
- case CH_CONNECTING:
- ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
- NULL, 0);
- /* fall through */
- case CH_LIVE:
- if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
- printk(KERN_ERR "sending CM DREQ failed.\n");
- break;
- case CH_DISCONNECTING:
- break;
- case CH_DRAINING:
- case CH_RELEASING:
- break;
- }
-}
-
-/**
- * srpt_close_ch() - Close an RDMA channel.
- */
-static void srpt_close_ch(struct srpt_rdma_ch *ch)
-{
- struct srpt_device *sdev;
-
- sdev = ch->sport->sdev;
- spin_lock_irq(&sdev->spinlock);
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
-}
-
-/**
- * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
- * @cm_id: Pointer to the CM ID of the channel to be drained.
- *
- * Note: Must be called from inside srpt_cm_handler to avoid a race between
- * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
- * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
- * waits until all target sessions for the associated IB device have been
- * unregistered and target session registration involves a call to
- * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
- * this function has finished).
- */
-static void srpt_drain_channel(struct ib_cm_id *cm_id)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- int ret;
- bool do_reset = false;
-
- WARN_ON_ONCE(irqs_disabled());
-
- sdev = cm_id->context;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->cm_id == cm_id) {
- do_reset = srpt_test_and_set_ch_state(ch,
- CH_CONNECTING, CH_DRAINING) ||
- srpt_test_and_set_ch_state(ch,
- CH_LIVE, CH_DRAINING) ||
- srpt_test_and_set_ch_state(ch,
- CH_DISCONNECTING, CH_DRAINING);
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
-
- if (do_reset) {
- ret = srpt_ch_qp_err(ch);
- if (ret < 0)
- printk(KERN_ERR "Setting queue pair in error state"
- " failed: %d\n", ret);
- }
-}
-
-/**
- * srpt_find_channel() - Look up an RDMA channel.
- * @cm_id: Pointer to the CM ID of the channel to be looked up.
- *
- * Return NULL if no matching RDMA channel has been found.
- */
-static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
- struct ib_cm_id *cm_id)
-{
- struct srpt_rdma_ch *ch;
- bool found;
-
- WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
-
- found = false;
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->cm_id == cm_id) {
- found = true;
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
-
- return found ? ch : NULL;
-}
-
-/**
- * srpt_release_channel() - Release channel resources.
- *
- * Schedules the actual release because:
- * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
- * trigger a deadlock.
- * - It is not safe to call TCM transport_* functions from interrupt context.
- */
-static void srpt_release_channel(struct srpt_rdma_ch *ch)
-{
- schedule_work(&ch->release_work);
-}
-
-static void srpt_release_channel_work(struct work_struct *w)
-{
- struct srpt_rdma_ch *ch;
- struct srpt_device *sdev;
-
- ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
- ch->release_done);
-
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
-
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
- ch->sess = NULL;
-
- srpt_destroy_ch_ib(ch);
-
- srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
- ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
-
- spin_lock_irq(&sdev->spinlock);
- list_del(&ch->list);
- spin_unlock_irq(&sdev->spinlock);
-
- ib_destroy_cm_id(ch->cm_id);
-
- if (ch->release_done)
- complete(ch->release_done);
-
- wake_up(&sdev->ch_releaseQ);
-
- kfree(ch);
-}
-
-static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
- u8 i_port_id[16])
-{
- struct srpt_node_acl *nacl;
-
- list_for_each_entry(nacl, &sport->port_acl_list, list)
- if (memcmp(nacl->i_port_id, i_port_id,
- sizeof(nacl->i_port_id)) == 0)
- return nacl;
-
- return NULL;
-}
-
-static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
- u8 i_port_id[16])
-{
- struct srpt_node_acl *nacl;
-
- spin_lock_irq(&sport->port_acl_lock);
- nacl = __srpt_lookup_acl(sport, i_port_id);
- spin_unlock_irq(&sport->port_acl_lock);
-
- return nacl;
-}
-
-/**
- * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
- *
- * Ownership of the cm_id is transferred to the target session if this
- * functions returns zero. Otherwise the caller remains the owner of cm_id.
- */
-static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
- void *private_data)
-{
- struct srpt_device *sdev = cm_id->context;
- struct srpt_port *sport = &sdev->port[param->port - 1];
- struct srp_login_req *req;
- struct srp_login_rsp *rsp;
- struct srp_login_rej *rej;
- struct ib_cm_rep_param *rep_param;
- struct srpt_rdma_ch *ch, *tmp_ch;
- struct srpt_node_acl *nacl;
- u32 it_iu_len;
- int i;
- int ret = 0;
-
- WARN_ON_ONCE(irqs_disabled());
-
- if (WARN_ON(!sdev || !private_data))
- return -EINVAL;
-
- req = (struct srp_login_req *)private_data;
-
- it_iu_len = be32_to_cpu(req->req_it_iu_len);
-
- printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
-
- rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
- rej = kzalloc(sizeof *rej, GFP_KERNEL);
- rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
-
- if (!rsp || !rej || !rep_param) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
- ret = -EINVAL;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
- " length (%d bytes) is out of range (%d .. %d)\n",
- it_iu_len, 64, srp_max_req_size);
- goto reject;
- }
-
- if (!sport->enabled) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- ret = -EINVAL;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
- " has not yet been enabled\n");
- goto reject;
- }
-
- if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
-
- spin_lock_irq(&sdev->spinlock);
-
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
- if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
- && !memcmp(ch->t_port_id, req->target_port_id, 16)
- && param->port == ch->sport->port
- && param->listen_id == ch->sport->sdev->cm_id
- && ch->cm_id) {
- enum rdma_ch_state ch_state;
-
- ch_state = srpt_get_ch_state(ch);
- if (ch_state != CH_CONNECTING
- && ch_state != CH_LIVE)
- continue;
-
- /* found an existing channel */
- pr_debug("Found existing channel %s"
- " cm_id= %p state= %d\n",
- ch->sess_name, ch->cm_id, ch_state);
-
- __srpt_close_ch(ch);
-
- rsp->rsp_flags =
- SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
- }
- }
-
- spin_unlock_irq(&sdev->spinlock);
-
- } else
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
-
- if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
- || *(__be64 *)(req->target_port_id + 8) !=
- cpu_to_be64(srpt_service_guid)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
- ret = -ENOMEM;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
- " has an invalid target port identifier.\n");
- goto reject;
- }
-
- ch = kzalloc(sizeof *ch, GFP_KERNEL);
- if (!ch) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
- ret = -ENOMEM;
- goto reject;
- }
-
- INIT_WORK(&ch->release_work, srpt_release_channel_work);
- memcpy(ch->i_port_id, req->initiator_port_id, 16);
- memcpy(ch->t_port_id, req->target_port_id, 16);
- ch->sport = &sdev->port[param->port - 1];
- ch->cm_id = cm_id;
- /*
- * Avoid QUEUE_FULL conditions by limiting the number of buffers used
- * for the SRP protocol to the command queue size.
- */
- ch->rq_size = SRPT_RQ_SIZE;
- spin_lock_init(&ch->spinlock);
- ch->state = CH_CONNECTING;
- INIT_LIST_HEAD(&ch->cmd_wait_list);
- ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
-
- ch->ioctx_ring = (struct srpt_send_ioctx **)
- srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
- sizeof(*ch->ioctx_ring[0]),
- ch->rsp_size, DMA_TO_DEVICE);
- if (!ch->ioctx_ring)
- goto free_ch;
-
- INIT_LIST_HEAD(&ch->free_list);
- for (i = 0; i < ch->rq_size; i++) {
- ch->ioctx_ring[i]->ch = ch;
- list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
- }
-
- ret = srpt_create_ch_ib(ch);
- if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
- " a new RDMA channel failed.\n");
- goto free_ring;
- }
-
- ret = srpt_ch_qp_rtr(ch, ch->qp);
- if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
- " RTR failed (error code = %d)\n", ret);
- goto destroy_ib;
- }
- /*
- * Use the initator port identifier as the session name.
- */
- snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
- be64_to_cpu(*(__be64 *)ch->i_port_id),
- be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
-
- pr_debug("registering session %s\n", ch->sess_name);
-
- nacl = srpt_lookup_acl(sport, ch->i_port_id);
- if (!nacl) {
- printk(KERN_INFO "Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", ch->sess_name);
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
- goto destroy_ib;
- }
-
- ch->sess = transport_init_session();
- if (!ch->sess) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_debug("Failed to create session\n");
- goto deregister_session;
- }
- ch->sess->se_node_acl = &nacl->nacl;
- transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
-
- pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
- ch->sess_name, ch->cm_id);
-
- /* create srp_login_response */
- rsp->opcode = SRP_LOGIN_RSP;
- rsp->tag = req->tag;
- rsp->max_it_iu_len = req->req_it_iu_len;
- rsp->max_ti_iu_len = req->req_it_iu_len;
- ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
- rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
- atomic_set(&ch->req_lim, ch->rq_size);
- atomic_set(&ch->req_lim_delta, 0);
-
- /* create cm reply */
- rep_param->qp_num = ch->qp->qp_num;
- rep_param->private_data = (void *)rsp;
- rep_param->private_data_len = sizeof *rsp;
- rep_param->rnr_retry_count = 7;
- rep_param->flow_control = 1;
- rep_param->failover_accepted = 0;
- rep_param->srq = 1;
- rep_param->responder_resources = 4;
- rep_param->initiator_depth = 4;
-
- ret = ib_send_cm_rep(cm_id, rep_param);
- if (ret) {
- printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
- " (error code = %d)\n", ret);
- goto release_channel;
- }
-
- spin_lock_irq(&sdev->spinlock);
- list_add_tail(&ch->list, &sdev->rch_list);
- spin_unlock_irq(&sdev->spinlock);
-
- goto out;
-
-release_channel:
- srpt_set_ch_state(ch, CH_RELEASING);
- transport_deregister_session_configfs(ch->sess);
-
-deregister_session:
- transport_deregister_session(ch->sess);
- ch->sess = NULL;
-
-destroy_ib:
- srpt_destroy_ch_ib(ch);
-
-free_ring:
- srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
- ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
-free_ch:
- kfree(ch);
-
-reject:
- rej->opcode = SRP_LOGIN_REJ;
- rej->tag = req->tag;
- rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
-
- ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
- (void *)rej, sizeof *rej);
-
-out:
- kfree(rep_param);
- kfree(rsp);
- kfree(rej);
-
- return ret;
-}
-
-static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
-{
- printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
- * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
- *
- * An IB_CM_RTU_RECEIVED message indicates that the connection is established
- * and that the recipient may begin transmitting (RTU = ready to use).
- */
-static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
-{
- struct srpt_rdma_ch *ch;
- int ret;
-
- ch = srpt_find_channel(cm_id->context, cm_id);
- BUG_ON(!ch);
-
- if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
- struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
-
- ret = srpt_ch_qp_rts(ch, ch->qp);
-
- list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
- wait_list) {
- list_del(&ioctx->wait_list);
- srpt_handle_new_iu(ch, ioctx, NULL);
- }
- if (ret)
- srpt_close_ch(ch);
- }
-}
-
-static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
-{
- printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
-{
- printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
- * srpt_cm_dreq_recv() - Process reception of a DREQ message.
- */
-static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
- bool send_drep = false;
-
- ch = srpt_find_channel(cm_id->context, cm_id);
- BUG_ON(!ch);
-
- pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
-
- spin_lock_irqsave(&ch->spinlock, flags);
- switch (ch->state) {
- case CH_CONNECTING:
- case CH_LIVE:
- send_drep = true;
- ch->state = CH_DISCONNECTING;
- break;
- case CH_DISCONNECTING:
- case CH_DRAINING:
- case CH_RELEASING:
- WARN(true, "unexpected channel state %d\n", ch->state);
- break;
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
-
- if (send_drep) {
- if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
- printk(KERN_ERR "Sending IB DREP failed.\n");
- printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
- ch->sess_name);
- }
-}
-
-/**
- * srpt_cm_drep_recv() - Process reception of a DREP message.
- */
-static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
-{
- printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
- cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
- * srpt_cm_handler() - IB connection manager callback function.
- *
- * A non-zero return value will cause the caller destroy the CM ID.
- *
- * Note: srpt_cm_handler() must only return a non-zero value when transferring
- * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
- * a non-zero value in any other case will trigger a race with the
- * ib_destroy_cm_id() call in srpt_release_channel().
- */
-static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
-{
- int ret;
-
- ret = 0;
- switch (event->event) {
- case IB_CM_REQ_RECEIVED:
- ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
- event->private_data);
- break;
- case IB_CM_REJ_RECEIVED:
- srpt_cm_rej_recv(cm_id);
- break;
- case IB_CM_RTU_RECEIVED:
- case IB_CM_USER_ESTABLISHED:
- srpt_cm_rtu_recv(cm_id);
- break;
- case IB_CM_DREQ_RECEIVED:
- srpt_cm_dreq_recv(cm_id);
- break;
- case IB_CM_DREP_RECEIVED:
- srpt_cm_drep_recv(cm_id);
- break;
- case IB_CM_TIMEWAIT_EXIT:
- srpt_cm_timewait_exit(cm_id);
- break;
- case IB_CM_REP_ERROR:
- srpt_cm_rep_error(cm_id);
- break;
- case IB_CM_DREQ_ERROR:
- printk(KERN_INFO "Received IB DREQ ERROR event.\n");
- break;
- case IB_CM_MRA_RECEIVED:
- printk(KERN_INFO "Received IB MRA event\n");
- break;
- default:
- printk(KERN_ERR "received unrecognized IB CM event %d\n",
- event->event);
- break;
- }
-
- return ret;
-}
-
-/**
- * srpt_perform_rdmas() - Perform IB RDMA.
- *
- * Returns zero upon success or a negative number upon failure.
- */
-static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct ib_send_wr wr;
- struct ib_send_wr *bad_wr;
- struct rdma_iu *riu;
- int i;
- int ret;
- int sq_wr_avail;
- enum dma_data_direction dir;
- const int n_rdma = ioctx->n_rdma;
-
- dir = ioctx->cmd.data_direction;
- if (dir == DMA_TO_DEVICE) {
- /* write */
- ret = -ENOMEM;
- sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
- if (sq_wr_avail < 0) {
- printk(KERN_WARNING "IB send queue full (needed %d)\n",
- n_rdma);
- goto out;
- }
- }
-
- ioctx->rdma_aborted = false;
- ret = 0;
- riu = ioctx->rdma_ius;
- memset(&wr, 0, sizeof wr);
-
- for (i = 0; i < n_rdma; ++i, ++riu) {
- if (dir == DMA_FROM_DEVICE) {
- wr.opcode = IB_WR_RDMA_WRITE;
- wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
- SRPT_RDMA_WRITE_LAST :
- SRPT_RDMA_MID,
- ioctx->ioctx.index);
- } else {
- wr.opcode = IB_WR_RDMA_READ;
- wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
- SRPT_RDMA_READ_LAST :
- SRPT_RDMA_MID,
- ioctx->ioctx.index);
- }
- wr.next = NULL;
- wr.wr.rdma.remote_addr = riu->raddr;
- wr.wr.rdma.rkey = riu->rkey;
- wr.num_sge = riu->sge_cnt;
- wr.sg_list = riu->sge;
-
- /* only get completion event for the last rdma write */
- if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(ch->qp, &wr, &bad_wr);
- if (ret)
- break;
- }
-
- if (ret)
- printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
- __func__, __LINE__, ret, i, n_rdma);
- if (ret && i > 0) {
- wr.num_sge = 0;
- wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
- wr.send_flags = IB_SEND_SIGNALED;
- while (ch->state == CH_LIVE &&
- ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
- printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
- ioctx->ioctx.index);
- msleep(1000);
- }
- while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
- printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
- ioctx->ioctx.index);
- msleep(1000);
- }
- }
-out:
- if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
- atomic_add(n_rdma, &ch->sq_wr_avail);
- return ret;
-}
-
-/**
- * srpt_xfer_data() - Start data transfer from initiator to target.
- */
-static int srpt_xfer_data(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- int ret;
-
- ret = srpt_map_sg_to_ib_sge(ch, ioctx);
- if (ret) {
- printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
- goto out;
- }
-
- ret = srpt_perform_rdmas(ch, ioctx);
- if (ret) {
- if (ret == -EAGAIN || ret == -ENOMEM)
- printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
- __func__, __LINE__, ret);
- else
- printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
- __func__, __LINE__, ret);
- goto out_unmap;
- }
-
-out:
- return ret;
-out_unmap:
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
- goto out;
-}
-
-static int srpt_write_pending_status(struct se_cmd *se_cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
-}
-
-/*
- * srpt_write_pending() - Start data transfer from initiator to target (write).
- */
-static int srpt_write_pending(struct se_cmd *se_cmd)
-{
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *ioctx;
- enum srpt_command_state new_state;
- enum rdma_ch_state ch_state;
- int ret;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
-
- new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
- WARN_ON(new_state == SRPT_STATE_DONE);
-
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- ch_state = srpt_get_ch_state(ch);
- switch (ch_state) {
- case CH_CONNECTING:
- WARN(true, "unexpected channel state %d\n", ch_state);
- ret = -EINVAL;
- goto out;
- case CH_LIVE:
- break;
- case CH_DISCONNECTING:
- case CH_DRAINING:
- case CH_RELEASING:
- pr_debug("cmd with tag %lld: channel disconnecting\n",
- ioctx->tag);
- srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
- ret = -EINVAL;
- goto out;
- }
- ret = srpt_xfer_data(ch, ioctx);
-
-out:
- return ret;
-}
-
-static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
-{
- switch (tcm_mgmt_status) {
- case TMR_FUNCTION_COMPLETE:
- return SRP_TSK_MGMT_SUCCESS;
- case TMR_FUNCTION_REJECTED:
- return SRP_TSK_MGMT_FUNC_NOT_SUPP;
- }
- return SRP_TSK_MGMT_FAILED;
-}
-
-/**
- * srpt_queue_response() - Transmits the response to a SCSI command.
- *
- * Callback function called by the TCM core. Must not block since it can be
- * invoked on the context of the IB completion handler.
- */
-static int srpt_queue_response(struct se_cmd *cmd)
-{
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *ioctx;
- enum srpt_command_state state;
- unsigned long flags;
- int ret;
- enum dma_data_direction dir;
- int resp_len;
- u8 srp_tm_status;
-
- ret = 0;
-
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- switch (state) {
- case SRPT_STATE_NEW:
- case SRPT_STATE_DATA_IN:
- ioctx->state = SRPT_STATE_CMD_RSP_SENT;
- break;
- case SRPT_STATE_MGMT:
- ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
- break;
- default:
- WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
- ch, ioctx->ioctx.index, ioctx->state);
- break;
- }
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
-
- if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
- || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
- atomic_inc(&ch->req_lim_delta);
- srpt_abort_cmd(ioctx);
- goto out;
- }
-
- dir = ioctx->cmd.data_direction;
-
- /* For read commands, transfer the data to the initiator. */
- if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
- !ioctx->queue_status_only) {
- ret = srpt_xfer_data(ch, ioctx);
- if (ret) {
- printk(KERN_ERR "xfer_data failed for tag %llu\n",
- ioctx->tag);
- goto out;
- }
- }
-
- if (state != SRPT_STATE_MGMT)
- resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
- cmd->scsi_status);
- else {
- srp_tm_status
- = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
- resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
- ioctx->tag);
- }
- ret = srpt_post_send(ch, ioctx, resp_len);
- if (ret) {
- printk(KERN_ERR "sending cmd response failed for tag %llu\n",
- ioctx->tag);
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
- srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
- }
-
-out:
- return ret;
-}
-
-static int srpt_queue_status(struct se_cmd *cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- BUG_ON(ioctx->sense_data != cmd->sense_buffer);
- if (cmd->se_cmd_flags &
- (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
- WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
- ioctx->queue_status_only = true;
- return srpt_queue_response(cmd);
-}
-
-static void srpt_refresh_port_work(struct work_struct *work)
-{
- struct srpt_port *sport = container_of(work, struct srpt_port, work);
-
- srpt_refresh_port(sport);
-}
-
-static int srpt_ch_list_empty(struct srpt_device *sdev)
-{
- int res;
-
- spin_lock_irq(&sdev->spinlock);
- res = list_empty(&sdev->rch_list);
- spin_unlock_irq(&sdev->spinlock);
-
- return res;
-}
-
-/**
- * srpt_release_sdev() - Free the channel resources associated with a target.
- */
-static int srpt_release_sdev(struct srpt_device *sdev)
-{
- struct srpt_rdma_ch *ch, *tmp_ch;
- int res;
-
- WARN_ON_ONCE(irqs_disabled());
-
- BUG_ON(!sdev);
-
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
-
- res = wait_event_interruptible(sdev->ch_releaseQ,
- srpt_ch_list_empty(sdev));
- if (res)
- printk(KERN_ERR "%s: interrupted.\n", __func__);
-
- return 0;
-}
-
-static struct srpt_port *__srpt_lookup_port(const char *name)
-{
- struct ib_device *dev;
- struct srpt_device *sdev;
- struct srpt_port *sport;
- int i;
-
- list_for_each_entry(sdev, &srpt_dev_list, list) {
- dev = sdev->device;
- if (!dev)
- continue;
-
- for (i = 0; i < dev->phys_port_cnt; i++) {
- sport = &sdev->port[i];
-
- if (!strcmp(sport->port_guid, name))
- return sport;
- }
- }
-
- return NULL;
-}
-
-static struct srpt_port *srpt_lookup_port(const char *name)
-{
- struct srpt_port *sport;
-
- spin_lock(&srpt_dev_lock);
- sport = __srpt_lookup_port(name);
- spin_unlock(&srpt_dev_lock);
-
- return sport;
-}
-
-/**
- * srpt_add_one() - Infiniband device addition callback function.
- */
-static void srpt_add_one(struct ib_device *device)
-{
- struct srpt_device *sdev;
- struct srpt_port *sport;
- struct ib_srq_init_attr srq_attr;
- int i;
-
- pr_debug("device = %p, device->dma_ops = %p\n", device,
- device->dma_ops);
-
- sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
- if (!sdev)
- goto err;
-
- sdev->device = device;
- INIT_LIST_HEAD(&sdev->rch_list);
- init_waitqueue_head(&sdev->ch_releaseQ);
- spin_lock_init(&sdev->spinlock);
-
- if (ib_query_device(device, &sdev->dev_attr))
- goto free_dev;
-
- sdev->pd = ib_alloc_pd(device);
- if (IS_ERR(sdev->pd))
- goto free_dev;
-
- sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(sdev->mr))
- goto err_pd;
-
- sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
-
- srq_attr.event_handler = srpt_srq_event;
- srq_attr.srq_context = (void *)sdev;
- srq_attr.attr.max_wr = sdev->srq_size;
- srq_attr.attr.max_sge = 1;
- srq_attr.attr.srq_limit = 0;
-
- sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
- if (IS_ERR(sdev->srq))
- goto err_mr;
-
- pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
- __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
- device->name);
-
- if (!srpt_service_guid)
- srpt_service_guid = be64_to_cpu(device->node_guid);
-
- sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
- if (IS_ERR(sdev->cm_id))
- goto err_srq;
-
- /* print out target login information */
- pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
- "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
- srpt_service_guid, srpt_service_guid);
-
- /*
- * We do not have a consistent service_id (ie. also id_ext of target_id)
- * to identify this target. We currently use the guid of the first HCA
- * in the system as service_id; therefore, the target_id will change
- * if this HCA is gone bad and replaced by different HCA
- */
- if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
- goto err_cm;
-
- INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
- srpt_event_handler);
- if (ib_register_event_handler(&sdev->event_handler))
- goto err_cm;
-
- sdev->ioctx_ring = (struct srpt_recv_ioctx **)
- srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
- sizeof(*sdev->ioctx_ring[0]),
- srp_max_req_size, DMA_FROM_DEVICE);
- if (!sdev->ioctx_ring)
- goto err_event;
-
- for (i = 0; i < sdev->srq_size; ++i)
- srpt_post_recv(sdev, sdev->ioctx_ring[i]);
-
- WARN_ON(sdev->device->phys_port_cnt
- > sizeof(sdev->port)/sizeof(sdev->port[0]));
-
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
- sport = &sdev->port[i - 1];
- sport->sdev = sdev;
- sport->port = i;
- sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
- sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
- sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
- INIT_WORK(&sport->work, srpt_refresh_port_work);
- INIT_LIST_HEAD(&sport->port_acl_list);
- spin_lock_init(&sport->port_acl_lock);
-
- if (srpt_refresh_port(sport)) {
- printk(KERN_ERR "MAD registration failed for %s-%d.\n",
- srpt_sdev_name(sdev), i);
- goto err_ring;
- }
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "0x%016llx%016llx",
- be64_to_cpu(sport->gid.global.subnet_prefix),
- be64_to_cpu(sport->gid.global.interface_id));
- }
-
- spin_lock(&srpt_dev_lock);
- list_add_tail(&sdev->list, &srpt_dev_list);
- spin_unlock(&srpt_dev_lock);
-
-out:
- ib_set_client_data(device, &srpt_client, sdev);
- pr_debug("added %s.\n", device->name);
- return;
-
-err_ring:
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size,
- DMA_FROM_DEVICE);
-err_event:
- ib_unregister_event_handler(&sdev->event_handler);
-err_cm:
- ib_destroy_cm_id(sdev->cm_id);
-err_srq:
- ib_destroy_srq(sdev->srq);
-err_mr:
- ib_dereg_mr(sdev->mr);
-err_pd:
- ib_dealloc_pd(sdev->pd);
-free_dev:
- kfree(sdev);
-err:
- sdev = NULL;
- printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
- goto out;
-}
-
-/**
- * srpt_remove_one() - InfiniBand device removal callback function.
- */
-static void srpt_remove_one(struct ib_device *device)
-{
- struct srpt_device *sdev;
- int i;
-
- sdev = ib_get_client_data(device, &srpt_client);
- if (!sdev) {
- printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
- device->name);
- return;
- }
-
- srpt_unregister_mad_agent(sdev);
-
- ib_unregister_event_handler(&sdev->event_handler);
-
- /* Cancel any work queued by the just unregistered IB event handler. */
- for (i = 0; i < sdev->device->phys_port_cnt; i++)
- cancel_work_sync(&sdev->port[i].work);
-
- ib_destroy_cm_id(sdev->cm_id);
-
- /*
- * Unregistering a target must happen after destroying sdev->cm_id
- * such that no new SRP_LOGIN_REQ information units can arrive while
- * destroying the target.
- */
- spin_lock(&srpt_dev_lock);
- list_del(&sdev->list);
- spin_unlock(&srpt_dev_lock);
- srpt_release_sdev(sdev);
-
- ib_destroy_srq(sdev->srq);
- ib_dereg_mr(sdev->mr);
- ib_dealloc_pd(sdev->pd);
-
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
- sdev->ioctx_ring = NULL;
- kfree(sdev);
-}
-
-static struct ib_client srpt_client = {
- .name = DRV_NAME,
- .add = srpt_add_one,
- .remove = srpt_remove_one
-};
-
-static int srpt_check_true(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static int srpt_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static char *srpt_get_fabric_name(void)
-{
- return "srpt";
-}
-
-static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- return SCSI_TRANSPORTID_PROTOCOLID_SRP;
-}
-
-static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
-{
- struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
-
- return sport->port_guid;
-}
-
-static u16 srpt_get_tag(struct se_portal_group *tpg)
-{
- return 1;
-}
-
-static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code, unsigned char *buf)
-{
- struct srpt_node_acl *nacl;
- struct spc_rdma_transport_id *tr_id;
-
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- tr_id = (void *)buf;
- tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
- memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
- return sizeof(*tr_id);
-}
-
-static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- return sizeof(struct spc_rdma_transport_id);
-}
-
-static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
- const char *buf, u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct spc_rdma_transport_id *tr_id;
-
- *port_nexus_ptr = NULL;
- *out_tid_len = sizeof(struct spc_rdma_transport_id);
- tr_id = (void *)buf;
- return (char *)tr_id->i_port_id;
-}
-
-static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct srpt_node_acl *nacl;
-
- nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
- if (!nacl) {
- printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
- return NULL;
- }
-
- return &nacl->nacl;
-}
-
-static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct srpt_node_acl *nacl;
-
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- kfree(nacl);
-}
-
-static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static void srpt_release_cmd(struct se_cmd *se_cmd)
-{
-}
-
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- return true;
-}
-
-/**
- * srpt_close_session() - Forcibly close a session.
- *
- * Callback function invoked by the TCM core to clean up sessions associated
- * with a node ACL when the user invokes
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
- */
-static void srpt_close_session(struct se_session *se_sess)
-{
- DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_rdma_ch *ch;
- struct srpt_device *sdev;
- int res;
-
- ch = se_sess->fabric_sess_ptr;
- WARN_ON(ch->sess != se_sess);
-
- pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
-
- sdev = ch->sport->sdev;
- spin_lock_irq(&sdev->spinlock);
- BUG_ON(ch->release_done);
- ch->release_done = &release_done;
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
-
- res = wait_for_completion_timeout(&release_done, 60 * HZ);
- WARN_ON(res <= 0);
-}
-
-/**
- * To do: Find out whether stop_session() has a meaning for transports
- * other than iSCSI.
- */
-static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
- int conn_sleep)
-{
-}
-
-static void srpt_reset_nexus(struct se_session *sess)
-{
- printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
-}
-
-static int srpt_sess_logged_in(struct se_session *se_sess)
-{
- return true;
-}
-
-/**
- * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
- *
- * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
- * This object represents an arbitrary integer used to uniquely identify a
- * particular attached remote initiator port to a particular SCSI target port
- * within a particular SCSI target device within a particular SCSI instance.
- */
-static u32 srpt_sess_get_index(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
-{
-}
-
-static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return ioctx->tag;
-}
-
-/* Note: only used from inside debug printk's by the TCM core. */
-static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
-{
- struct srpt_send_ioctx *ioctx;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx);
-}
-
-static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
-{
- return 0;
-}
-
-static u16 srpt_get_fabric_sense_len(void)
-{
- return 0;
-}
-
-static int srpt_is_state_remove(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-/**
- * srpt_parse_i_port_id() - Parse an initiator port ID.
- * @name: ASCII representation of a 128-bit initiator port ID.
- * @i_port_id: Binary 128-bit port ID.
- */
-static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
-{
- const char *p;
- unsigned len, count, leading_zero_bytes;
- int ret, rc;
-
- p = name;
- if (strnicmp(p, "0x", 2) == 0)
- p += 2;
- ret = -EINVAL;
- len = strlen(p);
- if (len % 2)
- goto out;
- count = min(len / 2, 16U);
- leading_zero_bytes = 16 - count;
- memset(i_port_id, 0, leading_zero_bytes);
- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
- if (rc < 0)
- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
- ret = 0;
-out:
- return ret;
-}
-
-/*
- * configfs callback function invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
- */
-static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
- struct config_group *group,
- const char *name)
-{
- struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct srpt_node_acl *nacl;
- int ret = 0;
- u32 nexus_depth = 1;
- u8 i_port_id[16];
-
- if (srpt_parse_i_port_id(i_port_id, name) < 0) {
- printk(KERN_ERR "invalid initiator port ID %s\n", name);
- ret = -EINVAL;
- goto err;
- }
-
- se_nacl_new = srpt_alloc_fabric_acl(tpg);
- if (!se_nacl_new) {
- ret = -ENOMEM;
- goto err;
- }
- /*
- * nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a node ACL from demo mode to explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
- nexus_depth);
- if (IS_ERR(se_nacl)) {
- ret = PTR_ERR(se_nacl);
- goto err;
- }
- /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
- nacl->sport = sport;
-
- spin_lock_irq(&sport->port_acl_lock);
- list_add_tail(&nacl->list, &sport->port_acl_list);
- spin_unlock_irq(&sport->port_acl_lock);
-
- return se_nacl;
-err:
- return ERR_PTR(ret);
-}
-
-/*
- * configfs callback function invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
- */
-static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
-{
- struct srpt_node_acl *nacl;
- struct srpt_device *sdev;
- struct srpt_port *sport;
-
- nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
- sport = nacl->sport;
- sdev = sport->sdev;
- spin_lock_irq(&sport->port_acl_lock);
- list_del(&nacl->list);
- spin_unlock_irq(&sport->port_acl_lock);
- core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
- srpt_release_fabric_acl(NULL, se_nacl);
-}
-
-static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
-
- return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
-}
-
-static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(page, 0, &val);
- if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
- return -EINVAL;
- }
- if (val > MAX_SRPT_RDMA_SIZE) {
- pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
- MAX_SRPT_RDMA_SIZE);
- return -EINVAL;
- }
- if (val < DEFAULT_MAX_RDMA_SIZE) {
- pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
- val, DEFAULT_MAX_RDMA_SIZE);
- return -EINVAL;
- }
- sport->port_attrib.srp_max_rdma_size = val;
-
- return count;
-}
-
-TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
-
-static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
-
- return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
-}
-
-static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(page, 0, &val);
- if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
- return -EINVAL;
- }
- if (val > MAX_SRPT_RSP_SIZE) {
- pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
- MAX_SRPT_RSP_SIZE);
- return -EINVAL;
- }
- if (val < MIN_MAX_RSP_SIZE) {
- pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
- MIN_MAX_RSP_SIZE);
- return -EINVAL;
- }
- sport->port_attrib.srp_max_rsp_size = val;
-
- return count;
-}
-
-TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
-
-static ssize_t srpt_tpg_attrib_show_srp_sq_size(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
-
- return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
-}
-
-static ssize_t srpt_tpg_attrib_store_srp_sq_size(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(page, 0, &val);
- if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
- return -EINVAL;
- }
- if (val > MAX_SRPT_SRQ_SIZE) {
- pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
- MAX_SRPT_SRQ_SIZE);
- return -EINVAL;
- }
- if (val < MIN_SRPT_SRQ_SIZE) {
- pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
- MIN_SRPT_SRQ_SIZE);
- return -EINVAL;
- }
- sport->port_attrib.srp_sq_size = val;
-
- return count;
-}
-
-TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
-
-static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
- &srpt_tpg_attrib_srp_max_rdma_size.attr,
- &srpt_tpg_attrib_srp_max_rsp_size.attr,
- &srpt_tpg_attrib_srp_sq_size.attr,
- NULL,
-};
-
-static ssize_t srpt_tpg_show_enable(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
-
- return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
-}
-
-static ssize_t srpt_tpg_store_enable(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
-{
- struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
- unsigned long tmp;
- int ret;
-
- ret = strict_strtoul(page, 0, &tmp);
- if (ret < 0) {
- printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
- return -EINVAL;
- }
-
- if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
- return -EINVAL;
- }
- if (tmp == 1)
- sport->enabled = true;
- else
- sport->enabled = false;
-
- return count;
-}
-
-TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
-
-static struct configfs_attribute *srpt_tpg_attrs[] = {
- &srpt_tpg_enable.attr,
- NULL,
-};
-
-/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg
- */
-static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
-{
- struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
- int res;
-
- /* Initialize sport->port_wwn and sport->port_tpg_1 */
- res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
- &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
- if (res)
- return ERR_PTR(res);
-
- return &sport->port_tpg_1;
-}
-
-/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg
- */
-static void srpt_drop_tpg(struct se_portal_group *tpg)
-{
- struct srpt_port *sport = container_of(tpg,
- struct srpt_port, port_tpg_1);
-
- sport->enabled = false;
- core_tpg_deregister(&sport->port_tpg_1);
-}
-
-/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port
- */
-static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
- struct config_group *group,
- const char *name)
-{
- struct srpt_port *sport;
- int ret;
-
- sport = srpt_lookup_port(name);
- pr_debug("make_tport(%s)\n", name);
- ret = -EINVAL;
- if (!sport)
- goto err;
-
- return &sport->port_wwn;
-
-err:
- return ERR_PTR(ret);
-}
-
-/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port
- */
-static void srpt_drop_tport(struct se_wwn *wwn)
-{
- struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
-
- pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
-}
-
-static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
-}
-
-TF_WWN_ATTR_RO(srpt, version);
-
-static struct configfs_attribute *srpt_wwn_attrs[] = {
- &srpt_wwn_version.attr,
- NULL,
-};
-
-static struct target_core_fabric_ops srpt_template = {
- .get_fabric_name = srpt_get_fabric_name,
- .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
- .tpg_get_wwn = srpt_get_fabric_wwn,
- .tpg_get_tag = srpt_get_tag,
- .tpg_get_default_depth = srpt_get_default_depth,
- .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
- .tpg_check_demo_mode = srpt_check_false,
- .tpg_check_demo_mode_cache = srpt_check_true,
- .tpg_check_demo_mode_write_protect = srpt_check_true,
- .tpg_check_prod_mode_write_protect = srpt_check_false,
- .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
- .tpg_release_fabric_acl = srpt_release_fabric_acl,
- .tpg_get_inst_index = srpt_tpg_get_inst_index,
- .release_cmd = srpt_release_cmd,
- .check_stop_free = srpt_check_stop_free,
- .shutdown_session = srpt_shutdown_session,
- .close_session = srpt_close_session,
- .stop_session = srpt_stop_session,
- .fall_back_to_erl0 = srpt_reset_nexus,
- .sess_logged_in = srpt_sess_logged_in,
- .sess_get_index = srpt_sess_get_index,
- .sess_get_initiator_sid = NULL,
- .write_pending = srpt_write_pending,
- .write_pending_status = srpt_write_pending_status,
- .set_default_node_attributes = srpt_set_default_node_attrs,
- .get_task_tag = srpt_get_task_tag,
- .get_cmd_state = srpt_get_tcm_cmd_state,
- .queue_data_in = srpt_queue_response,
- .queue_status = srpt_queue_status,
- .queue_tm_rsp = srpt_queue_response,
- .get_fabric_sense_len = srpt_get_fabric_sense_len,
- .set_fabric_sense_len = srpt_set_fabric_sense_len,
- .is_state_remove = srpt_is_state_remove,
- /*
- * Setup function pointers for generic logic in
- * target_core_fabric_configfs.c
- */
- .fabric_make_wwn = srpt_make_tport,
- .fabric_drop_wwn = srpt_drop_tport,
- .fabric_make_tpg = srpt_make_tpg,
- .fabric_drop_tpg = srpt_drop_tpg,
- .fabric_post_link = NULL,
- .fabric_pre_unlink = NULL,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = srpt_make_nodeacl,
- .fabric_drop_nodeacl = srpt_drop_nodeacl,
-};
-
-/**
- * srpt_init_module() - Kernel module initialization.
- *
- * Note: Since ib_register_client() registers callback functions, and since at
- * least one of these callback functions (srpt_add_one()) calls target core
- * functions, this driver must be registered with the target core before
- * ib_register_client() is called.
- */
-static int __init srpt_init_module(void)
-{
- int ret;
-
- ret = -EINVAL;
- if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
- printk(KERN_ERR "invalid value %d for kernel module parameter"
- " srp_max_req_size -- must be at least %d.\n",
- srp_max_req_size, MIN_MAX_REQ_SIZE);
- goto out;
- }
-
- if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
- || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
- printk(KERN_ERR "invalid value %d for kernel module parameter"
- " srpt_srq_size -- must be in the range [%d..%d].\n",
- srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
- goto out;
- }
-
- spin_lock_init(&srpt_dev_lock);
- INIT_LIST_HEAD(&srpt_dev_list);
-
- ret = -ENODEV;
- srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
- if (!srpt_target) {
- printk(KERN_ERR "couldn't register\n");
- goto out;
- }
-
- srpt_target->tf_ops = srpt_template;
-
- /* Enable SG chaining */
- srpt_target->tf_ops.task_sg_chaining = true;
-
- /*
- * Set up default attribute lists.
- */
- srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
-
- ret = target_fabric_configfs_register(srpt_target);
- if (ret < 0) {
- printk(KERN_ERR "couldn't register\n");
- goto out_free_target;
- }
-
- ret = ib_register_client(&srpt_client);
- if (ret) {
- printk(KERN_ERR "couldn't register IB client\n");
- goto out_unregister_target;
- }
-
- return 0;
-
-out_unregister_target:
- target_fabric_configfs_deregister(srpt_target);
- srpt_target = NULL;
-out_free_target:
- if (srpt_target)
- target_fabric_configfs_free(srpt_target);
-out:
- return ret;
-}
-
-static void __exit srpt_cleanup_module(void)
-{
- ib_unregister_client(&srpt_client);
- target_fabric_configfs_deregister(srpt_target);
- srpt_target = NULL;
-}
-
-module_init(srpt_init_module);
-module_exit(srpt_cleanup_module);
diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h
deleted file mode 100644
index b4b4bbcd7f16..000000000000
--- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
- * Copyright (C) 2009 - 2010 Bart Van Assche .
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef IB_SRPT_H
-#define IB_SRPT_H
-
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-
-#include
-
-#include "ib_dm_mad.h"
-
-/*
- * The prefix the ServiceName field must start with in the device management
- * ServiceEntries attribute pair. See also the SRP specification.
- */
-#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
-
-enum {
- /*
- * SRP IOControllerProfile attributes for SRP target ports that have
- * not been defined in . Source: section B.7, table B.7
- * in the SRP specification.
- */
- SRP_PROTOCOL = 0x0108,
- SRP_PROTOCOL_VERSION = 0x0001,
- SRP_IO_SUBCLASS = 0x609e,
- SRP_SEND_TO_IOC = 0x01,
- SRP_SEND_FROM_IOC = 0x02,
- SRP_RDMA_READ_FROM_IOC = 0x08,
- SRP_RDMA_WRITE_FROM_IOC = 0x20,
-
- /*
- * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
- * specification.
- */
- SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
- SRP_LOSOLNT = 0x10, /* logout solicited notification */
- SRP_CRSOLNT = 0x20, /* credit request solicited notification */
- SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
-
- /*
- * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
- * 18 and 20 in the SRP specification.
- */
- SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
- SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
-
- /*
- * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
- * 16 and 22 in the SRP specification.
- */
- SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
-
- /* See also table 24 in the SRP specification. */
- SRP_TSK_MGMT_SUCCESS = 0x00,
- SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
- SRP_TSK_MGMT_FAILED = 0x05,
-
- /* See also table 21 in the SRP specification. */
- SRP_CMD_SIMPLE_Q = 0x0,
- SRP_CMD_HEAD_OF_Q = 0x1,
- SRP_CMD_ORDERED_Q = 0x2,
- SRP_CMD_ACA = 0x4,
-
- SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
- SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
- SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
-
- SRPT_DEF_SG_TABLESIZE = 128,
- SRPT_DEF_SG_PER_WQE = 16,
-
- MIN_SRPT_SQ_SIZE = 16,
- DEF_SRPT_SQ_SIZE = 4096,
- SRPT_RQ_SIZE = 128,
- MIN_SRPT_SRQ_SIZE = 4,
- DEFAULT_SRPT_SRQ_SIZE = 4095,
- MAX_SRPT_SRQ_SIZE = 65535,
- MAX_SRPT_RDMA_SIZE = 1U << 24,
- MAX_SRPT_RSP_SIZE = 1024,
-
- MIN_MAX_REQ_SIZE = 996,
- DEFAULT_MAX_REQ_SIZE
- = sizeof(struct srp_cmd)/*48*/
- + sizeof(struct srp_indirect_buf)/*20*/
- + 128 * sizeof(struct srp_direct_buf)/*16*/,
-
- MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
- DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
-
- DEFAULT_MAX_RDMA_SIZE = 65536,
-};
-
-enum srpt_opcode {
- SRPT_RECV,
- SRPT_SEND,
- SRPT_RDMA_MID,
- SRPT_RDMA_ABORT,
- SRPT_RDMA_READ_LAST,
- SRPT_RDMA_WRITE_LAST,
-};
-
-static inline u64 encode_wr_id(u8 opcode, u32 idx)
-{
- return ((u64)opcode << 32) | idx;
-}
-static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
-{
- return wr_id >> 32;
-}
-static inline u32 idx_from_wr_id(u64 wr_id)
-{
- return (u32)wr_id;
-}
-
-struct rdma_iu {
- u64 raddr;
- u32 rkey;
- struct ib_sge *sge;
- u32 sge_cnt;
- int mem_id;
-};
-
-/**
- * enum srpt_command_state - SCSI command state managed by SRPT.
- * @SRPT_STATE_NEW: New command arrived and is being processed.
- * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
- * for data arrival.
- * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
- * being processed.
- * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
- * @SRPT_STATE_MGMT: Processing a SCSI task management command.
- * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
- * @SRPT_STATE_DONE: Command processing finished successfully, command
- * processing has been aborted or command processing
- * failed.
- */
-enum srpt_command_state {
- SRPT_STATE_NEW = 0,
- SRPT_STATE_NEED_DATA = 1,
- SRPT_STATE_DATA_IN = 2,
- SRPT_STATE_CMD_RSP_SENT = 3,
- SRPT_STATE_MGMT = 4,
- SRPT_STATE_MGMT_RSP_SENT = 5,
- SRPT_STATE_DONE = 6,
-};
-
-/**
- * struct srpt_ioctx - Shared SRPT I/O context information.
- * @buf: Pointer to the buffer.
- * @dma: DMA address of the buffer.
- * @index: Index of the I/O context in its ioctx_ring array.
- */
-struct srpt_ioctx {
- void *buf;
- dma_addr_t dma;
- uint32_t index;
-};
-
-/**
- * struct srpt_recv_ioctx - SRPT receive I/O context.
- * @ioctx: See above.
- * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
- */
-struct srpt_recv_ioctx {
- struct srpt_ioctx ioctx;
- struct list_head wait_list;
-};
-
-/**
- * struct srpt_send_ioctx - SRPT send I/O context.
- * @ioctx: See above.
- * @ch: Channel pointer.
- * @free_list: Node in srpt_rdma_ch.free_list.
- * @n_rbuf: Number of data buffers in the received SRP command.
- * @rbufs: Pointer to SRP data buffer array.
- * @single_rbuf: SRP data buffer if the command has only a single buffer.
- * @sg: Pointer to sg-list associated with this I/O context.
- * @sg_cnt: SG-list size.
- * @mapped_sg_count: ib_dma_map_sg() return value.
- * @n_rdma_ius: Number of elements in the rdma_ius array.
- * @rdma_ius: Array with information about the RDMA mapping.
- * @tag: Tag of the received SRP information unit.
- * @spinlock: Protects 'state'.
- * @state: I/O context state.
- * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
- * the already initiated transfers have finished.
- * @cmd: Target core command data structure.
- * @sense_data: SCSI sense data.
- */
-struct srpt_send_ioctx {
- struct srpt_ioctx ioctx;
- struct srpt_rdma_ch *ch;
- struct kref kref;
- struct rdma_iu *rdma_ius;
- struct srp_direct_buf *rbufs;
- struct srp_direct_buf single_rbuf;
- struct scatterlist *sg;
- struct list_head free_list;
- spinlock_t spinlock;
- enum srpt_command_state state;
- bool rdma_aborted;
- struct se_cmd cmd;
- struct completion tx_done;
- u64 tag;
- int sg_cnt;
- int mapped_sg_count;
- u16 n_rdma_ius;
- u8 n_rdma;
- u8 n_rbuf;
- bool queue_status_only;
- u8 sense_data[SCSI_SENSE_BUFFERSIZE];
-};
-
-/**
- * enum rdma_ch_state - SRP channel state.
- * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
- * @CH_LIVE: QP is in RTS state.
- * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
- * or DREQ has been send and waiting for DREP
- * or .
- * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
- * @CH_RELEASING: Last WQE event has been received; releasing resources.
- */
-enum rdma_ch_state {
- CH_CONNECTING,
- CH_LIVE,
- CH_DISCONNECTING,
- CH_DRAINING,
- CH_RELEASING
-};
-
-/**
- * struct srpt_rdma_ch - RDMA channel.
- * @wait_queue: Allows the kernel thread to wait for more work.
- * @thread: Kernel thread that processes the IB queues associated with
- * the channel.
- * @cm_id: IB CM ID associated with the channel.
- * @qp: IB queue pair used for communicating over this channel.
- * @cq: IB completion queue for this channel.
- * @rq_size: IB receive queue size.
- * @rsp_size IB response message size in bytes.
- * @sq_wr_avail: number of work requests available in the send queue.
- * @sport: pointer to the information of the HCA port used by this
- * channel.
- * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
- * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
- * @max_ti_iu_len: maximum target-to-initiator information unit length.
- * @req_lim: request limit: maximum number of requests that may be sent
- * by the initiator without having received a response.
- * @req_lim_delta: Number of credits not yet sent back to the initiator.
- * @spinlock: Protects free_list and state.
- * @free_list: Head of list with free send I/O contexts.
- * @state: channel state. See also enum rdma_ch_state.
- * @ioctx_ring: Send ring.
- * @wc: IB work completion array for srpt_process_completion().
- * @list: Node for insertion in the srpt_device.rch_list list.
- * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
- * list contains struct srpt_ioctx elements and is protected
- * against concurrent modification by the cm_id spinlock.
- * @sess: Session information associated with this SRP channel.
- * @sess_name: Session name.
- * @release_work: Allows scheduling of srpt_release_channel().
- * @release_done: Enables waiting for srpt_release_channel() completion.
- */
-struct srpt_rdma_ch {
- wait_queue_head_t wait_queue;
- struct task_struct *thread;
- struct ib_cm_id *cm_id;
- struct ib_qp *qp;
- struct ib_cq *cq;
- int rq_size;
- u32 rsp_size;
- atomic_t sq_wr_avail;
- struct srpt_port *sport;
- u8 i_port_id[16];
- u8 t_port_id[16];
- int max_ti_iu_len;
- atomic_t req_lim;
- atomic_t req_lim_delta;
- spinlock_t spinlock;
- struct list_head free_list;
- enum rdma_ch_state state;
- struct srpt_send_ioctx **ioctx_ring;
- struct ib_wc wc[16];
- struct list_head list;
- struct list_head cmd_wait_list;
- struct se_session *sess;
- u8 sess_name[36];
- struct work_struct release_work;
- struct completion *release_done;
-};
-
-/**
- * struct srpt_port_attib - Attributes for SRPT port
- * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
- * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
- * @srp_sq_size: Shared receive queue (SRQ) size.
- */
-struct srpt_port_attrib {
- u32 srp_max_rdma_size;
- u32 srp_max_rsp_size;
- u32 srp_sq_size;
-};
-
-/**
- * struct srpt_port - Information associated by SRPT with a single IB port.
- * @sdev: backpointer to the HCA information.
- * @mad_agent: per-port management datagram processing information.
- * @enabled: Whether or not this target port is enabled.
- * @port_guid: ASCII representation of Port GUID
- * @port: one-based port number.
- * @sm_lid: cached value of the port's sm_lid.
- * @lid: cached value of the port's lid.
- * @gid: cached value of the port's gid.
- * @port_acl_lock spinlock for port_acl_list:
- * @work: work structure for refreshing the aforementioned cached values.
- * @port_tpg_1 Target portal group = 1 data.
- * @port_wwn: Target core WWN data.
- * @port_acl_list: Head of the list with all node ACLs for this port.
- */
-struct srpt_port {
- struct srpt_device *sdev;
- struct ib_mad_agent *mad_agent;
- bool enabled;
- u8 port_guid[64];
- u8 port;
- u16 sm_lid;
- u16 lid;
- union ib_gid gid;
- spinlock_t port_acl_lock;
- struct work_struct work;
- struct se_portal_group port_tpg_1;
- struct se_wwn port_wwn;
- struct list_head port_acl_list;
- struct srpt_port_attrib port_attrib;
-};
-
-/**
- * struct srpt_device - Information associated by SRPT with a single HCA.
- * @device: Backpointer to the struct ib_device managed by the IB core.
- * @pd: IB protection domain.
- * @mr: L_Key (local key) with write access to all local memory.
- * @srq: Per-HCA SRQ (shared receive queue).
- * @cm_id: Connection identifier.
- * @dev_attr: Attributes of the InfiniBand device as obtained during the
- * ib_client.add() callback.
- * @srq_size: SRQ size.
- * @ioctx_ring: Per-HCA SRQ.
- * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
- * @ch_releaseQ: Enables waiting for removal from rch_list.
- * @spinlock: Protects rch_list and tpg.
- * @port: Information about the ports owned by this HCA.
- * @event_handler: Per-HCA asynchronous IB event handler.
- * @list: Node in srpt_dev_list.
- */
-struct srpt_device {
- struct ib_device *device;
- struct ib_pd *pd;
- struct ib_mr *mr;
- struct ib_srq *srq;
- struct ib_cm_id *cm_id;
- struct ib_device_attr dev_attr;
- int srq_size;
- struct srpt_recv_ioctx **ioctx_ring;
- struct list_head rch_list;
- wait_queue_head_t ch_releaseQ;
- spinlock_t spinlock;
- struct srpt_port port[2];
- struct ib_event_handler event_handler;
- struct list_head list;
-};
-
-/**
- * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
- * @i_port_id: 128-bit SRP initiator port ID.
- * @sport: port information.
- * @nacl: Target core node ACL information.
- * @list: Element of the per-HCA ACL list.
- */
-struct srpt_node_acl {
- u8 i_port_id[16];
- struct srpt_port *sport;
- struct se_node_acl nacl;
- struct list_head list;
-};
-
-/*
- * SRP-releated SCSI persistent reservation definitions.
- *
- * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
- * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
- * SCSI over an RDMA interface).
- */
-
-enum {
- SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
-};
-
-struct spc_rdma_transport_id {
- uint8_t protocol_identifier;
- uint8_t reserved[7];
- uint8_t i_port_id[16];
-};
-
-#endif /* IB_SRPT_H */
diff --git a/trunk/drivers/media/common/tuners/tuner-xc2028.c b/trunk/drivers/media/common/tuners/tuner-xc2028.c
index b5ee3ebfcfca..27555995f7e4 100644
--- a/trunk/drivers/media/common/tuners/tuner-xc2028.c
+++ b/trunk/drivers/media/common/tuners/tuner-xc2028.c
@@ -24,21 +24,6 @@
#include
#include "dvb_frontend.h"
-/* Registers (Write-only) */
-#define XREG_INIT 0x00
-#define XREG_RF_FREQ 0x02
-#define XREG_POWER_DOWN 0x08
-
-/* Registers (Read-only) */
-#define XREG_FREQ_ERROR 0x01
-#define XREG_LOCK 0x02
-#define XREG_VERSION 0x04
-#define XREG_PRODUCT_ID 0x08
-#define XREG_HSYNC_FREQ 0x10
-#define XREG_FRAME_LINES 0x20
-#define XREG_SNR 0x40
-
-#define XREG_ADC_ENV 0x0100
static int debug;
module_param(debug, int, 0644);
@@ -900,7 +885,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
- rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
+ rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
if (rc < 0)
goto ret;
@@ -909,7 +894,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
signal = 1 << 11;
/* Get SNR of the video signal */
- rc = xc2028_get_reg(priv, XREG_SNR, &signal);
+ rc = xc2028_get_reg(priv, 0x0040, &signal);
if (rc < 0)
goto ret;
@@ -1034,9 +1019,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
/* CMD= Set frequency */
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
if (rc < 0)
goto ret;
@@ -1216,9 +1201,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
mutex_lock(&priv->lock);
if (priv->firm_version < 0x0202)
- rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
+ rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
else
- rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
+ rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
priv->cur_fw.type = 0; /* need firmware reload */
diff --git a/trunk/drivers/media/common/tuners/xc4000.c b/trunk/drivers/media/common/tuners/xc4000.c
index 68397110b7d9..d218c1d68c33 100644
--- a/trunk/drivers/media/common/tuners/xc4000.c
+++ b/trunk/drivers/media/common/tuners/xc4000.c
@@ -154,8 +154,6 @@ struct xc4000_priv {
#define XREG_SNR 0x06
#define XREG_VERSION 0x07
#define XREG_PRODUCT_ID 0x08
-#define XREG_SIGNAL_LEVEL 0x0A
-#define XREG_NOISE_LEVEL 0x0B
/*
Basic firmware description. This will remain with
@@ -488,16 +486,6 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
return xc4000_readreg(priv, XREG_QUALITY, quality);
}
-static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
-{
- return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
-}
-
-static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
-{
- return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
-}
-
static u16 xc_wait_for_lock(struct xc4000_priv *priv)
{
u16 lock_state = 0;
@@ -1101,8 +1089,6 @@ static void xc_debug_dump(struct xc4000_priv *priv)
u32 hsync_freq_hz = 0;
u16 frame_lines;
u16 quality;
- u16 signal = 0;
- u16 noise = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
@@ -1133,12 +1119,6 @@ static void xc_debug_dump(struct xc4000_priv *priv)
xc_get_quality(priv, &quality);
dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
-
- xc_get_signal_level(priv, &signal);
- dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
-
- xc_get_noise_level(priv, &noise);
- dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
}
static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1452,71 +1432,6 @@ static int xc4000_set_analog_params(struct dvb_frontend *fe,
return ret;
}
-static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
-{
- struct xc4000_priv *priv = fe->tuner_priv;
- u16 value = 0;
- int rc;
-
- mutex_lock(&priv->lock);
- rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
- mutex_unlock(&priv->lock);
-
- if (rc < 0)
- goto ret;
-
- /* Informations from real testing of DVB-T and radio part,
- coeficient for one dB is 0xff.
- */
- tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
-
- /* all known digital modes */
- if ((priv->video_standard == XC4000_DTV6) ||
- (priv->video_standard == XC4000_DTV7) ||
- (priv->video_standard == XC4000_DTV7_8) ||
- (priv->video_standard == XC4000_DTV8))
- goto digital;
-
- /* Analog mode has NOISE LEVEL important, signal
- depends only on gain of antenna and amplifiers,
- but it doesn't tell anything about real quality
- of reception.
- */
- mutex_lock(&priv->lock);
- rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
- mutex_unlock(&priv->lock);
-
- tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
-
- /* highest noise level: 32dB */
- if (value >= 0x2000) {
- value = 0;
- } else {
- value = ~value << 3;
- }
-
- goto ret;
-
- /* Digital mode has SIGNAL LEVEL important and real
- noise level is stored in demodulator registers.
- */
-digital:
- /* best signal: -50dB */
- if (value <= 0x3200) {
- value = 0xffff;
- /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
- } else if (value >= 0x713A) {
- value = 0;
- } else {
- value = ~(value - 0x3200) << 2;
- }
-
-ret:
- *strength = value;
-
- return rc;
-}
-
static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
@@ -1644,7 +1559,6 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
.set_params = xc4000_set_params,
.set_analog_params = xc4000_set_analog_params,
.get_frequency = xc4000_get_frequency,
- .get_rf_strength = xc4000_get_signal,
.get_bandwidth = xc4000_get_bandwidth,
.get_status = xc4000_get_status
};
diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c
index fbbe545a74cb..b15db4fe347b 100644
--- a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,11 +904,8 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int i;
- u32 delsys;
- delsys = c->delivery_system;
memset(c, 0, sizeof(struct dtv_frontend_properties));
- c->delivery_system = delsys;
c->state = DTV_CLEAR;
@@ -1012,6 +1009,25 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
+ _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
+ _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
+ _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
+ _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
+ _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
+ _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
+
_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
_DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
@@ -1397,15 +1413,6 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
enum dvbv3_emulation_type type;
- /*
- * It was reported that some old DVBv5 applications were
- * filling delivery_system with SYS_UNDEFINED. If this happens,
- * assume that the application wants to use the first supported
- * delivery system.
- */
- if (c->delivery_system == SYS_UNDEFINED)
- c->delivery_system = fe->ops.delsys[0];
-
if (desired_system == SYS_UNDEFINED) {
/*
* A DVBv3 call doesn't know what's the desired system.
@@ -1725,7 +1732,6 @@ static int dvb_frontend_ioctl_properties(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int err = 0;
@@ -1792,14 +1798,9 @@ static int dvb_frontend_ioctl_properties(struct file *file,
/*
* Fills the cache out struct with the cache contents, plus
- * the data retrieved from get_frontend, if the frontend
- * is not idle. Otherwise, returns the cached content
+ * the data retrieved from get_frontend.
*/
- if (fepriv->state != FESTATE_IDLE) {
- err = dtv_get_frontend(fe, NULL);
- if (err < 0)
- goto out;
- }
+ dtv_get_frontend(fe, NULL);
for (i = 0; i < tvps->num; i++) {
err = dtv_property_process_get(fe, c, tvp + i, file);
if (err < 0)
diff --git a/trunk/drivers/media/dvb/dvb-usb/anysee.c b/trunk/drivers/media/dvb/dvb-usb/anysee.c
index 1455e2644ab5..d66192974d68 100644
--- a/trunk/drivers/media/dvb/dvb-usb/anysee.c
+++ b/trunk/drivers/media/dvb/dvb-usb/anysee.c
@@ -877,18 +877,24 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
case ANYSEE_HW_508T2C: /* 20 */
/* E7 T2C */
- if (state->fe_id)
- break;
-
/* enable DVB-T/T2/C demod on IOE[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
- /* attach demod */
- adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
- &anysee_cxd2820r_config, &adap->dev->i2c_adap,
- NULL);
+ if (state->fe_id == 0) {
+ /* DVB-T/T2 */
+ adap->fe_adap[state->fe_id].fe =
+ dvb_attach(cxd2820r_attach,
+ &anysee_cxd2820r_config,
+ &adap->dev->i2c_adap, NULL);
+ } else {
+ /* DVB-C */
+ adap->fe_adap[state->fe_id].fe =
+ dvb_attach(cxd2820r_attach,
+ &anysee_cxd2820r_config,
+ &adap->dev->i2c_adap, adap->fe_adap[0].fe);
+ }
state->has_ci = true;
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700.h b/trunk/drivers/media/dvb/dvb-usb/dib0700.h
index 7de125c0b36f..9bd6d51b3b93 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700.h
@@ -48,8 +48,6 @@ struct dib0700_state {
u8 disable_streaming_master_mode;
u32 fw_version;
u32 nb_packet_buffer_size;
- int (*read_status)(struct dvb_frontend *, fe_status_t *);
- int (*sleep)(struct dvb_frontend* fe);
u8 buf[255];
};
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c
index 070e82aa53f5..206999476f02 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -834,7 +834,6 @@ static struct usb_driver dib0700_driver = {
module_usb_driver(dib0700_driver);
-MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
MODULE_AUTHOR("Patrick Boettcher ");
MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
MODULE_VERSION("1.0");
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f9e966aa26e7..81ef4b46f790 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3066,25 +3066,19 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
}
};
-static void stk7070pd_init(struct dvb_usb_device *dev)
+static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
{
- dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
msleep(10);
- dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
- dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
- dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
- dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
- dib0700_ctrl_clock(dev, 72, 1);
+ dib0700_ctrl_clock(adap->dev, 72, 1);
msleep(10);
- dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
-}
-
-static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
-{
- stk7070pd_init(adap->dev);
-
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
msleep(10);
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
@@ -3105,77 +3099,6 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
-static int novatd_read_status_override(struct dvb_frontend *fe,
- fe_status_t *stat)
-{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
- struct dvb_usb_device *dev = adap->dev;
- struct dib0700_state *state = dev->priv;
- int ret;
-
- ret = state->read_status(fe, stat);
-
- if (!ret)
- dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
- !!(*stat & FE_HAS_LOCK));
-
- return ret;
-}
-
-static int novatd_sleep_override(struct dvb_frontend* fe)
-{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
- struct dvb_usb_device *dev = adap->dev;
- struct dib0700_state *state = dev->priv;
-
- /* turn off LED */
- dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
-
- return state->sleep(fe);
-}
-
-/**
- * novatd_frontend_attach - Nova-TD specific attach
- *
- * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
- * information purposes.
- */
-static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
-{
- struct dvb_usb_device *dev = adap->dev;
- struct dib0700_state *st = dev->priv;
-
- if (adap->id == 0) {
- stk7070pd_init(dev);
-
- /* turn the power LED on, the other two off (just in case) */
- dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
- dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
- dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
-
- if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
- stk7070pd_dib7000p_config) != 0) {
- err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
- __func__);
- return -ENODEV;
- }
- }
-
- adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
- adap->id == 0 ? 0x80 : 0x82,
- &stk7070pd_dib7000p_config[adap->id]);
-
- if (adap->fe_adap[0].fe == NULL)
- return -ENODEV;
-
- st->read_status = adap->fe_adap[0].fe->ops.read_status;
- adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
- st->sleep = adap->fe_adap[0].fe->ops.sleep;
- adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
-
- return 0;
-}
-
/* S5H1411 */
static struct s5h1411_config pinnacle_801e_config = {
.output_mode = S5H1411_PARALLEL_OUTPUT,
@@ -3938,57 +3861,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
- .num_adapters = 2,
- .adapter = {
- {
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
- .pid_filter_count = 32,
- .pid_filter = stk70x0p_pid_filter,
- .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
- .frontend_attach = novatd_frontend_attach,
- .tuner_attach = dib7070p_tuner_attach,
-
- DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
- }},
- .size_of_priv = sizeof(struct dib0700_adapter_state),
- }, {
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
- .pid_filter_count = 32,
- .pid_filter = stk70x0p_pid_filter,
- .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
- .frontend_attach = novatd_frontend_attach,
- .tuner_attach = dib7070p_tuner_attach,
-
- DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
- }},
- .size_of_priv = sizeof(struct dib0700_adapter_state),
- }
- },
-
- .num_device_descs = 1,
- .devices = {
- { "Hauppauge Nova-TD Stick (52009)",
- { &dib0700_usb_id_table[35], NULL },
- { NULL },
- },
- },
-
- .rc.core = {
- .rc_interval = DEFAULT_RC_INTERVAL,
- .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
- .module_name = "dib0700",
- .rc_query = dib0700_rc_query_old_firmware,
- .allowed_protos = RC_TYPE_RC5 |
- RC_TYPE_RC6 |
- RC_TYPE_NEC,
- .change_protocol = dib0700_change_protocol,
- },
- }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
-
.num_adapters = 2,
.adapter = {
{
@@ -4020,7 +3892,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 5,
+ .num_device_descs = 6,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -4030,6 +3902,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[18], NULL },
{ NULL },
},
+ { "Hauppauge Nova-TD Stick (52009)",
+ { &dib0700_usb_id_table[35], NULL },
+ { NULL },
+ },
{ "Hauppauge Nova-TD-500 (84xxx)",
{ &dib0700_usb_id_table[36], NULL },
{ NULL },
diff --git a/trunk/drivers/media/dvb/frontends/cxd2820r_core.c b/trunk/drivers/media/dvb/frontends/cxd2820r_core.c
index caae7f79c837..93e1b12e7907 100644
--- a/trunk/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/trunk/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -309,14 +309,9 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
static int cxd2820r_get_frontend(struct dvb_frontend *fe)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
-
- if (priv->delivery_system == SYS_UNDEFINED)
- return 0;
-
switch (fe->dtv_property_cache.delivery_system) {
case SYS_DVBT:
ret = cxd2820r_get_frontend_t(fe);
@@ -481,10 +476,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
/* switch between DVB-T and DVB-T2 when tune fails */
- if (priv->last_tune_failed) {
+ if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) {
if (priv->delivery_system == SYS_DVBT)
c->delivery_system = SYS_DVBT2;
- else if (priv->delivery_system == SYS_DVBT2)
+ else
c->delivery_system = SYS_DVBT;
}
@@ -497,7 +492,6 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
/* frontend lock wait loop count */
switch (priv->delivery_system) {
case SYS_DVBT:
- case SYS_DVBC_ANNEX_A:
i = 20;
break;
case SYS_DVBT2:
diff --git a/trunk/drivers/media/dvb/frontends/ds3000.c b/trunk/drivers/media/dvb/frontends/ds3000.c
index af65d013db11..938777065de6 100644
--- a/trunk/drivers/media/dvb/frontends/ds3000.c
+++ b/trunk/drivers/media/dvb/frontends/ds3000.c
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
for (i = 0; i < 30 ; i++) {
ds3000_read_status(fe, &status);
- if (status & FE_HAS_LOCK)
+ if (status && FE_HAS_LOCK)
break;
msleep(10);
diff --git a/trunk/drivers/media/dvb/frontends/mb86a20s.c b/trunk/drivers/media/dvb/frontends/mb86a20s.c
index fade566927c3..7fa3e472cdca 100644
--- a/trunk/drivers/media/dvb/frontends/mb86a20s.c
+++ b/trunk/drivers/media/dvb/frontends/mb86a20s.c
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
[2] = 0x8e, /* Layer C */
};
- if (layer >= ARRAY_SIZE(reg))
+ if (layer > ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
[2] = 0x8f, /* Layer C */
};
- if (layer >= ARRAY_SIZE(reg))
+ if (layer > ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
[2] = 0x90, /* Layer C */
};
- if (layer >= ARRAY_SIZE(reg))
+ if (layer > ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
[2] = 0x91, /* Layer C */
};
- if (layer >= ARRAY_SIZE(reg))
+ if (layer > ARRAY_SIZE(reg))
return -EINVAL;
rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
if (rc < 0)
diff --git a/trunk/drivers/media/dvb/frontends/tda18271c2dd.c b/trunk/drivers/media/dvb/frontends/tda18271c2dd.c
index ad7c72e8f517..86da3d816498 100644
--- a/trunk/drivers/media/dvb/frontends/tda18271c2dd.c
+++ b/trunk/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include