diff --git a/[refs] b/[refs]
index 7eb0a017b089..e6a9d7fbf776 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 21a32816b2e13eafb6d8a4589a84c6e629adc392
+refs/heads/master: 94d1ac8b55799be10487fff9766cce6d6628462a
diff --git a/trunk/.gitignore b/trunk/.gitignore
index 5d56a3fd0de6..8faa6c02b39e 100644
--- a/trunk/.gitignore
+++ b/trunk/.gitignore
@@ -28,7 +28,6 @@ modules.builtin
*.gz
*.bz2
*.lzma
-*.xz
*.lzo
*.patch
*.gcno
diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl
index c27915893974..2861055afd7a 100644
--- a/trunk/Documentation/DocBook/drm.tmpl
+++ b/trunk/Documentation/DocBook/drm.tmpl
@@ -73,8 +73,8 @@
services.
- The core of every DRM driver is struct drm_driver. Drivers
- will typically statically initialize a drm_driver structure,
+ The core of every DRM driver is struct drm_device. Drivers
+ will typically statically initialize a drm_device structure,
then pass it to drm_init() at load time.
@@ -84,7 +84,7 @@
Driver initialization
Before calling the DRM initialization routines, the driver must
- first create and fill out a struct drm_driver structure.
+ first create and fill out a struct drm_device structure.
static struct drm_driver driver = {
diff --git a/trunk/Documentation/DocBook/filesystems.tmpl b/trunk/Documentation/DocBook/filesystems.tmpl
index f51f28531b8d..5e87ad58c0b5 100644
--- a/trunk/Documentation/DocBook/filesystems.tmpl
+++ b/trunk/Documentation/DocBook/filesystems.tmpl
@@ -82,11 +82,6 @@
-
- Events based on file descriptors
-!Efs/eventfd.c
-
-
The Filesystem for Exporting Kernel Objects
!Efs/sysfs/file.c
diff --git a/trunk/Documentation/hwmon/jc42 b/trunk/Documentation/hwmon/jc42
index a22ecf48f255..0e76ef12e4c6 100644
--- a/trunk/Documentation/hwmon/jc42
+++ b/trunk/Documentation/hwmon/jc42
@@ -51,8 +51,7 @@ Supported chips:
* JEDEC JC 42.4 compliant temperature sensor chips
Prefix: 'jc42'
Addresses scanned: I2C 0x18 - 0x1f
- Datasheet:
- http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
+ Datasheet: -
Author:
Guenter Roeck
@@ -61,11 +60,7 @@ Author:
Description
-----------
-This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
-which are used on many DDR3 memory modules for mobile devices and servers. Some
-systems use the sensor to prevent memory overheating by automatically throttling
-the memory controller.
-
+This driver implements support for JEDEC JC 42.4 compliant temperature sensors.
The driver auto-detects the chips listed above, but can be manually instantiated
to support other JC 42.4 compliant chips.
@@ -86,19 +81,15 @@ limits. The chip supports only a single register to configure the hysteresis,
which applies to all limits. This register can be written by writing into
temp1_crit_hyst. Other hysteresis attributes are read-only.
-If the BIOS has configured the sensor for automatic temperature management, it
-is likely that it has locked the registers, i.e., that the temperature limits
-cannot be changed.
-
Sysfs entries
-------------
temp1_input Temperature (RO)
-temp1_min Minimum temperature (RO or RW)
-temp1_max Maximum temperature (RO or RW)
-temp1_crit Critical high temperature (RO or RW)
+temp1_min Minimum temperature (RW)
+temp1_max Maximum temperature (RW)
+temp1_crit Critical high temperature (RW)
-temp1_crit_hyst Critical hysteresis temperature (RO or RW)
+temp1_crit_hyst Critical hysteresis temperature (RW)
temp1_max_hyst Maximum hysteresis temperature (RO)
temp1_min_alarm Temperature low alarm
diff --git a/trunk/Documentation/hwmon/k10temp b/trunk/Documentation/hwmon/k10temp
index d2b56a4fd1f5..6526eee525a6 100644
--- a/trunk/Documentation/hwmon/k10temp
+++ b/trunk/Documentation/hwmon/k10temp
@@ -9,8 +9,6 @@ Supported chips:
Socket S1G3: Athlon II, Sempron, Turion II
* AMD Family 11h processors:
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
-* AMD Family 12h processors: "Llano"
-* AMD Family 14h processors: "Brazos" (C/E/G-Series)
Prefix: 'k10temp'
Addresses scanned: PCI space
@@ -19,14 +17,10 @@ Supported chips:
http://support.amd.com/us/Processor_TechDocs/31116.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41256.pdf
- BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
- http://support.amd.com/us/Processor_TechDocs/43170.pdf
Revision Guide for AMD Family 10h Processors:
http://support.amd.com/us/Processor_TechDocs/41322.pdf
Revision Guide for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41788.pdf
- Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
- http://support.amd.com/us/Processor_TechDocs/47534.pdf
AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
http://support.amd.com/us/Processor_TechDocs/43373.pdf
AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@@ -40,7 +34,7 @@ Description
-----------
This driver permits reading of the internal temperature sensor of AMD
-Family 10h/11h/12h/14h processors.
+Family 10h and 11h processors.
All these processors have a sensor, but on those for Socket F or AM2+,
the sensor may return inconsistent values (erratum 319). The driver
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index f4a04c0c7edc..89835a4766a6 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -144,11 +144,6 @@ a fixed number of characters. This limit depends on the architecture
and is between 256 and 4096 characters. It is defined in the file
./include/asm/setup.h as COMMAND_LINE_SIZE.
-Finally, the [KMG] suffix is commonly described after a number of kernel
-parameter values. These 'K', 'M', and 'G' letters represent the _binary_
-multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
-bytes respectively. Such letter suffixes can also be entirely omitted.
-
acpi= [HW,ACPI,X86]
Advanced Configuration and Power Interface
@@ -550,20 +545,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format:
,,,[,]
- crashkernel=size[KMG][@offset[KMG]]
- [KNL] Using kexec, Linux can switch to a 'crash kernel'
- upon panic. This parameter reserves the physical
- memory region [offset, offset + size] for that kernel
- image. If '@offset' is omitted, then a suitable offset
- is selected automatically. Check
- Documentation/kdump/kdump.txt for further details.
+ crashkernel=nn[KMG]@ss[KMG]
+ [KNL] Reserve a chunk of physical memory to
+ hold a kernel to switch to with kexec on panic.
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
in the running system. The syntax of range is
start-[end] where start and end are both
a memory unit (amount[KMG]). See also
- Documentation/kdump/kdump.txt for an example.
+ Documentation/kdump/kdump.txt for a example.
cs89x0_dma= [HW,NET]
Format:
@@ -1271,9 +1262,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 (KERN_INFO) informational
7 (KERN_DEBUG) debug-level messages
- log_buf_len=n[KMG] Sets the size of the printk ring buffer,
- in bytes. n must be a power of two. The default
- size is set in the kernel config file.
+ log_buf_len=n Sets the size of the printk ring buffer, in bytes.
+ Format: { n | nk | nM }
+ n must be a power of two. The default size
+ is set in the kernel config file.
logo.nologo [FB] Disables display of the built-in Linux logo.
This may be used to provide more screen space for
diff --git a/trunk/Documentation/networking/00-INDEX b/trunk/Documentation/networking/00-INDEX
index 4edd78dfb362..fe5c099b8fc8 100644
--- a/trunk/Documentation/networking/00-INDEX
+++ b/trunk/Documentation/networking/00-INDEX
@@ -40,6 +40,8 @@ decnet.txt
- info on using the DECnet networking layer in Linux.
depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
+dgrs.txt
+ - the Digi International RightSwitch SE-X Ethernet driver
dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
e100.txt
@@ -48,6 +50,8 @@ e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards
eql.txt
- serial IP load balancing
+ethertap.txt
+ - the Ethertap user space packet reception and transmission driver
ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
filter.txt
@@ -100,6 +104,8 @@ tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
+wavelan.txt
+ - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
x25.txt
- general info on X.25 development.
x25-iface.txt
diff --git a/trunk/Documentation/networking/Makefile b/trunk/Documentation/networking/Makefile
index 24c308dd3fd1..5aba7a33aeeb 100644
--- a/trunk/Documentation/networking/Makefile
+++ b/trunk/Documentation/networking/Makefile
@@ -4,8 +4,6 @@ obj- := dummy.o
# List of programs to build
hostprogs-y := ifenslave
-HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
-
# Tell kbuild to always build the programs
always := $(hostprogs-y)
diff --git a/trunk/Documentation/networking/dns_resolver.txt b/trunk/Documentation/networking/dns_resolver.txt
index 04ca06325b08..aefd1e681804 100644
--- a/trunk/Documentation/networking/dns_resolver.txt
+++ b/trunk/Documentation/networking/dns_resolver.txt
@@ -61,6 +61,7 @@ before the more general line given above as the first match is the one taken.
create dns_resolver foo:* * /usr/sbin/dns.foo %k
+
=====
USAGE
=====
@@ -103,14 +104,6 @@ implemented in the module can be called after doing:
returned also.
-===============================
-READING DNS KEYS FROM USERSPACE
-===============================
-
-Keys of dns_resolver type can be read from userspace using keyctl_read() or
-"keyctl read/print/pipe".
-
-
=========
MECHANISM
=========
diff --git a/trunk/Documentation/devicetree/booting-without-of.txt b/trunk/Documentation/powerpc/booting-without-of.txt
similarity index 90%
rename from trunk/Documentation/devicetree/booting-without-of.txt
rename to trunk/Documentation/powerpc/booting-without-of.txt
index 28b1c9d3d351..7400d7555dc3 100644
--- a/trunk/Documentation/devicetree/booting-without-of.txt
+++ b/trunk/Documentation/powerpc/booting-without-of.txt
@@ -13,6 +13,7 @@ Table of Contents
I - Introduction
1) Entry point for arch/powerpc
+ 2) Board support
II - The DT block format
1) Header
@@ -40,6 +41,13 @@ Table of Contents
VI - System-on-a-chip devices and nodes
1) Defining child nodes of an SOC
2) Representing devices without a current OF specification
+ a) PHY nodes
+ b) Interrupt controllers
+ c) 4xx/Axon EMAC ethernet nodes
+ d) Xilinx IP cores
+ e) USB EHCI controllers
+ f) MDIO on GPIOs
+ g) SPI busses
VII - Specifying interrupt information for devices
1) interrupts property
@@ -115,7 +123,7 @@ Revision Information
I - Introduction
================
-During the development of the Linux/ppc64 kernel, and more
+During the recent development of the Linux/ppc64 kernel, and more
specifically, the addition of new platform types outside of the old
IBM pSeries/iSeries pair, it was decided to enforce some strict rules
regarding the kernel entry and bootloader <-> kernel interfaces, in
@@ -138,7 +146,7 @@ section III, but, for example, the kernel does not require you to
create a node for every PCI device in the system. It is a requirement
to have a node for PCI host bridges in order to provide interrupt
routing informations and memory/IO ranges, among others. It is also
-recommended to define nodes for on chip devices and other buses that
+recommended to define nodes for on chip devices and other busses that
don't specifically fit in an existing OF specification. This creates a
great flexibility in the way the kernel can then probe those and match
drivers to device, without having to hard code all sorts of tables. It
@@ -150,7 +158,7 @@ it with special cases.
1) Entry point for arch/powerpc
-------------------------------
- There is one single entry point to the kernel, at the start
+ There is one and one single entry point to the kernel, at the start
of the kernel image. That entry point supports two calling
conventions:
@@ -202,6 +210,12 @@ it with special cases.
with all CPUs. The way to do that with method b) will be
described in a later revision of this document.
+
+2) Board support
+----------------
+
+64-bit kernels:
+
Board supports (platforms) are not exclusive config options. An
arbitrary set of board supports can be built in a single kernel
image. The kernel will "know" what set of functions to use for a
@@ -220,11 +234,48 @@ it with special cases.
containing the various callbacks that the generic code will
use to get to your platform specific code
- A kernel image may support multiple platforms, but only if the
+ c) Add a reference to your "ppc_md" structure in the
+ "machines" table in arch/powerpc/kernel/setup_64.c if you are
+ a 64-bit platform.
+
+ d) request and get assigned a platform number (see PLATFORM_*
+ constants in arch/powerpc/include/asm/processor.h
+
+32-bit embedded kernels:
+
+ Currently, board support is essentially an exclusive config option.
+ The kernel is configured for a single platform. Part of the reason
+ for this is to keep kernels on embedded systems small and efficient;
+ part of this is due to the fact the code is already that way. In the
+ future, a kernel may support multiple platforms, but only if the
platforms feature the same core architecture. A single kernel build
cannot support both configurations with Book E and configurations
with classic Powerpc architectures.
+ 32-bit embedded platforms that are moved into arch/powerpc using a
+ flattened device tree should adopt the merged tree practice of
+ setting ppc_md up dynamically, even though the kernel is currently
+ built with support for only a single platform at a time. This allows
+ unification of the setup code, and will make it easier to go to a
+ multiple-platform-support model in the future.
+
+NOTE: I believe the above will be true once Ben's done with the merge
+of the boot sequences.... someone speak up if this is wrong!
+
+ To add a 32-bit embedded platform support, follow the instructions
+ for 64-bit platforms above, with the exception that the Kconfig
+ option should be set up such that the kernel builds exclusively for
+ the platform selected. The processor type for the platform should
+ enable another config option to select the specific board
+ supported.
+
+NOTE: If Ben doesn't merge the setup files, may need to change this to
+point to setup_32.c
+
+
+ I will describe later the boot process and various callbacks that
+ your platform should implement.
+
II - The DT block format
========================
@@ -249,8 +300,8 @@ the block to RAM before passing it to the kernel.
1) Header
---------
- The kernel is passed the physical address pointing to an area of memory
- that is roughly described in include/linux/of_fdt.h by the structure
+ The kernel is entered with r3 pointing to an area of memory that is
+ roughly described in arch/powerpc/include/asm/prom.h by the structure
boot_param_header:
struct boot_param_header {
@@ -288,7 +339,7 @@ struct boot_param_header {
All values in this header are in big endian format, the various
fields in this header are defined more precisely below. All
"offset" values are in bytes from the start of the header; that is
- from the physical base address of the device tree block.
+ from the value of r3.
- magic
@@ -386,7 +437,7 @@ struct boot_param_header {
------------------------------
- base -> | struct boot_param_header |
+ r3 -> | struct boot_param_header |
------------------------------
| (alignment gap) (*) |
------------------------------
@@ -406,7 +457,7 @@ struct boot_param_header {
-----> ------------------------------
|
|
- --- (base + totalsize)
+ --- (r3 + totalsize)
(*) The alignment gaps are not necessarily present; their presence
and size are dependent on the various alignment requirements of
@@ -449,7 +500,7 @@ the device-tree structure. It is typically used to represent "path" in
the device-tree. More details about the actual format of these will be
below.
-The kernel generic code does not make any formal use of the
+The kernel powerpc generic code does not make any formal use of the
unit address (though some board support code may do) so the only real
requirement here for the unit address is to ensure uniqueness of
the node unit name at a given level of the tree. Nodes with no notion
@@ -467,21 +518,20 @@ path to the root node is "/".
Every node which actually represents an actual device (that is, a node
which isn't only a virtual "container" for more nodes, like "/cpus"
-is) is also required to have a "compatible" property indicating the
-specific hardware and an optional list of devices it is fully
-backwards compatible with.
+is) is also required to have a "device_type" property indicating the
+type of node .
Finally, every node that can be referenced from a property in another
-node is required to have either a "phandle" or a "linux,phandle"
-property. Real Open Firmware implementations provide a unique
-"phandle" value for every node that the "prom_init()" trampoline code
-turns into "linux,phandle" properties. However, this is made optional
-if the flattened device tree is used directly. An example of a node
+node is required to have a "linux,phandle" property. Real open
+firmware implementations provide a unique "phandle" value for every
+node that the "prom_init()" trampoline code turns into
+"linux,phandle" properties. However, this is made optional if the
+flattened device tree is used directly. An example of a node
referencing another node via "phandle" is when laying out the
interrupt tree which will be described in a further version of this
document.
-The "phandle" property is a 32-bit value that uniquely
+This "linux, phandle" property is a 32-bit value that uniquely
identifies a node. You are free to use whatever values or system of
values, internal pointers, or whatever to generate these, the only
requirement is that every node for which you provide that property has
@@ -644,7 +694,7 @@ made of 3 cells, the bottom two containing the actual address itself
while the top cell contains address space indication, flags, and pci
bus & device numbers.
-For buses that support dynamic allocation, it's the accepted practice
+For busses that support dynamic allocation, it's the accepted practice
to then not provide the address in "reg" (keep it 0) though while
providing a flag indicating the address is dynamically allocated, and
then, to provide a separate "assigned-addresses" property that
@@ -661,7 +711,7 @@ prom_parse.c file of the recent kernels for your bus type.
The "reg" property only defines addresses and sizes (if #size-cells is
non-0) within a given bus. In order to translate addresses upward
(that is into parent bus addresses, and possibly into CPU physical
-addresses), all buses must contain a "ranges" property. If the
+addresses), all busses must contain a "ranges" property. If the
"ranges" property is missing at a given level, it's assumed that
translation isn't possible, i.e., the registers are not visible on the
parent bus. The format of the "ranges" property for a bus is a list
@@ -677,9 +727,9 @@ example, for a PCI host controller, that would be a CPU address. For a
PCI<->ISA bridge, that would be a PCI address. It defines the base
address in the parent bus where the beginning of that range is mapped.
-For new 64-bit board support, I recommend either the 2/2 format or
+For a new 64-bit powerpc board, I recommend either the 2/2 format or
Apple's 2/1 format which is slightly more compact since sizes usually
-fit in a single 32-bit word. New 32-bit board support should use a
+fit in a single 32-bit word. New 32-bit powerpc boards should use a
1/1 format, unless the processor supports physical addresses greater
than 32-bits, in which case a 2/1 format is recommended.
@@ -704,7 +754,7 @@ of their actual names.
While earlier users of Open Firmware like OldWorld macintoshes tended
to use the actual device name for the "name" property, it's nowadays
considered a good practice to use a name that is closer to the device
-class (often equal to device_type). For example, nowadays, Ethernet
+class (often equal to device_type). For example, nowadays, ethernet
controllers are named "ethernet", an additional "model" property
defining precisely the chip type/model, and "compatible" property
defining the family in case a single driver can driver more than one
@@ -722,7 +772,7 @@ is present).
4) Note about node and property names and character set
-------------------------------------------------------
-While Open Firmware provides more flexible usage of 8859-1, this
+While open firmware provides more flexible usage of 8859-1, this
specification enforces more strict rules. Nodes and properties should
be comprised only of ASCII characters 'a' to 'z', '0' to
'9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally
@@ -742,7 +792,7 @@ address which can extend beyond that limit.
--------------------------------
These are all that are currently required. However, it is strongly
recommended that you expose PCI host bridges as documented in the
- PCI binding to Open Firmware, and your interrupt tree as documented
+ PCI binding to open firmware, and your interrupt tree as documented
in OF interrupt tree specification.
a) The root node
@@ -752,12 +802,20 @@ address which can extend beyond that limit.
- model : this is your board name/model
- #address-cells : address representation for "root" devices
- #size-cells: the size representation for "root" devices
+ - device_type : This property shouldn't be necessary. However, if
+ you decide to create a device_type for your root node, make sure it
+ is _not_ "chrp" unless your platform is a pSeries or PAPR compliant
+ one for 64-bit, or a CHRP-type machine for 32-bit as this will
+ matched by the kernel this way.
+
+ Additionally, some recommended properties are:
+
- compatible : the board "family" generally finds its way here,
for example, if you have 2 board models with a similar layout,
that typically get driven by the same platform code in the
- kernel, you would specify the exact board model in the
- compatible property followed by an entry that represents the SoC
- model.
+ kernel, you would use a different "model" property but put a
+ value in "compatible". The kernel doesn't directly use that
+ value but it is generally useful.
The root node is also generally where you add additional properties
specific to your board like the serial number if any, that sort of
@@ -783,11 +841,8 @@ address which can extend beyond that limit.
So under /cpus, you are supposed to create a node for every CPU on
the machine. There is no specific restriction on the name of the
- CPU, though it's common to call it ,. For
+ CPU, though It's common practice to call it PowerPC,. For
example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX.
- However, the Generic Names convention suggests that it would be
- better to simply use 'cpu' for each cpu node and use the compatible
- property to identify the specific cpu core.
Required properties:
@@ -868,7 +923,7 @@ compatibility.
e) The /chosen node
- This node is a bit "special". Normally, that's where Open Firmware
+ This node is a bit "special". Normally, that's where open firmware
puts some variable environment information, like the arguments, or
the default input/output devices.
@@ -885,7 +940,11 @@ compatibility.
console device if any. Typically, if you have serial devices on
your board, you may want to put the full path to the one set as
the default console in the firmware here, for the kernel to pick
- it up as its own default console.
+ it up as its own default console. If you look at the function
+ set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see
+ that the kernel tries to find out the default console and has
+ knowledge of various types like 8250 serial ports. You may want
+ to extend this function to add your own.
Note that u-boot creates and fills in the chosen node for platforms
that use it.
@@ -896,23 +955,23 @@ compatibility.
f) the /soc node
- This node is used to represent a system-on-a-chip (SoC) and must be
- present if the processor is a SoC. The top-level soc node contains
- information that is global to all devices on the SoC. The node name
- should contain a unit address for the SoC, which is the base address
- of the memory-mapped register set for the SoC. The name of an SoC
+ This node is used to represent a system-on-a-chip (SOC) and must be
+ present if the processor is a SOC. The top-level soc node contains
+ information that is global to all devices on the SOC. The node name
+ should contain a unit address for the SOC, which is the base address
+ of the memory-mapped register set for the SOC. The name of an soc
node should start with "soc", and the remainder of the name should
represent the part number for the soc. For example, the MPC8540's
soc node would be called "soc8540".
Required properties:
+ - device_type : Should be "soc"
- ranges : Should be defined as specified in 1) to describe the
- translation of SoC addresses for memory mapped SoC registers.
- - bus-frequency: Contains the bus frequency for the SoC node.
+ translation of SOC addresses for memory mapped SOC registers.
+ - bus-frequency: Contains the bus frequency for the SOC node.
Typically, the value of this field is filled in by the boot
loader.
- - compatible : Exact model of the SoC
Recommended properties:
@@ -1096,13 +1155,12 @@ while all this has been defined and implemented.
- An example of code for iterating nodes & retrieving properties
directly from the flattened tree format can be found in the kernel
- file drivers/of/fdt.c. Look at the of_scan_flat_dt() function,
+ file arch/ppc64/kernel/prom.c, look at scan_flat_dt() function,
its usage in early_init_devtree(), and the corresponding various
early_init_dt_scan_*() callbacks. That code can be re-used in a
GPL bootloader, and as the author of that code, I would be happy
to discuss possible free licensing to any vendor who wishes to
integrate all or part of this code into a non-GPL bootloader.
- (reference needed; who is 'I' here? ---gcl Jan 31, 2011)
@@ -1145,19 +1203,18 @@ MPC8540.
2) Representing devices without a current OF specification
----------------------------------------------------------
-Currently, there are many devices on SoCs that do not have a standard
-representation defined as part of the Open Firmware specifications,
-mainly because the boards that contain these SoCs are not currently
-booted using Open Firmware. Binding documentation for new devices
-should be added to the Documentation/devicetree/bindings directory.
-That directory will expand as device tree support is added to more and
-more SoCs.
-
+Currently, there are many devices on SOCs that do not have a standard
+representation pre-defined as part of the open firmware
+specifications, mainly because the boards that contain these SOCs are
+not currently booted using open firmware. This section contains
+descriptions for the SOC devices for which new nodes have been
+defined; this list will expand as more and more SOC-containing
+platforms are moved over to use the flattened-device-tree model.
VII - Specifying interrupt information for devices
===================================================
-The device tree represents the buses and devices of a hardware
+The device tree represents the busses and devices of a hardware
system in a form similar to the physical bus topology of the
hardware.
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt b/trunk/Documentation/powerpc/dts-bindings/4xx/cpm.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt
rename to trunk/Documentation/powerpc/dts-bindings/4xx/cpm.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/4xx/emac.txt b/trunk/Documentation/powerpc/dts-bindings/4xx/emac.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
rename to trunk/Documentation/powerpc/dts-bindings/4xx/emac.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt b/trunk/Documentation/powerpc/dts-bindings/4xx/ndfc.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
rename to trunk/Documentation/powerpc/dts-bindings/4xx/ndfc.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt b/trunk/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
rename to trunk/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt b/trunk/Documentation/powerpc/dts-bindings/4xx/reboot.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt
rename to trunk/Documentation/powerpc/dts-bindings/4xx/reboot.txt
diff --git a/trunk/Documentation/devicetree/bindings/net/can/sja1000.txt b/trunk/Documentation/powerpc/dts-bindings/can/sja1000.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/net/can/sja1000.txt
rename to trunk/Documentation/powerpc/dts-bindings/can/sja1000.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt b/trunk/Documentation/powerpc/dts-bindings/ecm.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
rename to trunk/Documentation/powerpc/dts-bindings/ecm.txt
diff --git a/trunk/Documentation/devicetree/bindings/eeprom.txt b/trunk/Documentation/powerpc/dts-bindings/eeprom.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/eeprom.txt
rename to trunk/Documentation/powerpc/dts-bindings/eeprom.txt
diff --git a/trunk/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt
diff --git a/trunk/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/board.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/board.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/board.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/board.txt
diff --git a/trunk/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/can.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/can.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/diu.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/diu.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/diu.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/dma.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/dma.txt
diff --git a/trunk/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/gtm.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/gtm.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/guts.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/guts.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/guts.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/guts.txt
diff --git a/trunk/Documentation/devicetree/bindings/i2c/fsl-i2c.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/i2c.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/i2c/fsl-i2c.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/i2c.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/lbc.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/lbc.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/mcm.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/mcm.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/mpic.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/mpic.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/pmc.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/pmc.txt
diff --git a/trunk/Documentation/devicetree/bindings/ata/fsl-sata.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/sata.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/ata/fsl-sata.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/sata.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/sec.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/sec.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/sec.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/sec.txt
diff --git a/trunk/Documentation/devicetree/bindings/spi/fsl-spi.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/spi.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/spi/fsl-spi.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/spi.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/ssi.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/ssi.txt
diff --git a/trunk/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/tsec.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/tsec.txt
diff --git a/trunk/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt
diff --git a/trunk/Documentation/devicetree/bindings/usb/fsl-usb.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/usb.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/usb/fsl-usb.txt
rename to trunk/Documentation/powerpc/dts-bindings/fsl/usb.txt
diff --git a/trunk/Documentation/devicetree/bindings/gpio/gpio.txt b/trunk/Documentation/powerpc/dts-bindings/gpio/gpio.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/gpio/gpio.txt
rename to trunk/Documentation/powerpc/dts-bindings/gpio/gpio.txt
diff --git a/trunk/Documentation/devicetree/bindings/gpio/led.txt b/trunk/Documentation/powerpc/dts-bindings/gpio/led.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/gpio/led.txt
rename to trunk/Documentation/powerpc/dts-bindings/gpio/led.txt
diff --git a/trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt b/trunk/Documentation/powerpc/dts-bindings/gpio/mdio.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/net/mdio-gpio.txt
rename to trunk/Documentation/powerpc/dts-bindings/gpio/mdio.txt
diff --git a/trunk/Documentation/devicetree/bindings/marvell.txt b/trunk/Documentation/powerpc/dts-bindings/marvell.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/marvell.txt
rename to trunk/Documentation/powerpc/dts-bindings/marvell.txt
diff --git a/trunk/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/trunk/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
rename to trunk/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt
diff --git a/trunk/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/trunk/Documentation/powerpc/dts-bindings/mtd-physmap.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
rename to trunk/Documentation/powerpc/dts-bindings/mtd-physmap.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt b/trunk/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt
rename to trunk/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt
diff --git a/trunk/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt b/trunk/Documentation/powerpc/dts-bindings/nintendo/wii.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
rename to trunk/Documentation/powerpc/dts-bindings/nintendo/wii.txt
diff --git a/trunk/Documentation/devicetree/bindings/net/phy.txt b/trunk/Documentation/powerpc/dts-bindings/phy.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/net/phy.txt
rename to trunk/Documentation/powerpc/dts-bindings/phy.txt
diff --git a/trunk/Documentation/devicetree/bindings/spi/spi-bus.txt b/trunk/Documentation/powerpc/dts-bindings/spi-bus.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/spi/spi-bus.txt
rename to trunk/Documentation/powerpc/dts-bindings/spi-bus.txt
diff --git a/trunk/Documentation/devicetree/bindings/usb/usb-ehci.txt b/trunk/Documentation/powerpc/dts-bindings/usb-ehci.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/usb/usb-ehci.txt
rename to trunk/Documentation/powerpc/dts-bindings/usb-ehci.txt
diff --git a/trunk/Documentation/devicetree/bindings/xilinx.txt b/trunk/Documentation/powerpc/dts-bindings/xilinx.txt
similarity index 100%
rename from trunk/Documentation/devicetree/bindings/xilinx.txt
rename to trunk/Documentation/powerpc/dts-bindings/xilinx.txt
diff --git a/trunk/Documentation/rtc.txt b/trunk/Documentation/rtc.txt
index 250160469d83..9104c1062084 100644
--- a/trunk/Documentation/rtc.txt
+++ b/trunk/Documentation/rtc.txt
@@ -178,29 +178,38 @@ RTC class framework, but can't be supported by the older driver.
setting the longer alarm time and enabling its IRQ using a single
request (using the same model as EFI firmware).
- * RTC_UIE_ON, RTC_UIE_OFF ... if the RTC offers IRQs, the RTC framework
- will emulate this mechanism.
+ * RTC_UIE_ON, RTC_UIE_OFF ... if the RTC offers IRQs, it probably
+ also offers update IRQs whenever the "seconds" counter changes.
+ If needed, the RTC framework can emulate this mechanism.
- * RTC_PIE_ON, RTC_PIE_OFF, RTC_IRQP_SET, RTC_IRQP_READ ... these icotls
- are emulated via a kernel hrtimer.
+ * RTC_PIE_ON, RTC_PIE_OFF, RTC_IRQP_SET, RTC_IRQP_READ ... another
+ feature often accessible with an IRQ line is a periodic IRQ, issued
+ at settable frequencies (usually 2^N Hz).
In many cases, the RTC alarm can be a system wake event, used to force
Linux out of a low power sleep state (or hibernation) back to a fully
operational state. For example, a system could enter a deep power saving
state until it's time to execute some scheduled tasks.
-Note that many of these ioctls are handled by the common rtc-dev interface.
-Some common examples:
+Note that many of these ioctls need not actually be implemented by your
+driver. The common rtc-dev interface handles many of these nicely if your
+driver returns ENOIOCTLCMD. Some common examples:
* RTC_RD_TIME, RTC_SET_TIME: the read_time/set_time functions will be
called with appropriate values.
- * RTC_ALM_SET, RTC_ALM_READ, RTC_WKALM_SET, RTC_WKALM_RD: gets or sets
- the alarm rtc_timer. May call the set_alarm driver function.
+ * RTC_ALM_SET, RTC_ALM_READ, RTC_WKALM_SET, RTC_WKALM_RD: the
+ set_alarm/read_alarm functions will be called.
- * RTC_IRQP_SET, RTC_IRQP_READ: These are emulated by the generic code.
+ * RTC_IRQP_SET, RTC_IRQP_READ: the irq_set_freq function will be called
+ to set the frequency while the framework will handle the read for you
+ since the frequency is stored in the irq_freq member of the rtc_device
+ structure. Your driver needs to initialize the irq_freq member during
+ init. Make sure you check the requested frequency is in range of your
+ hardware in the irq_set_freq function. If it isn't, return -EINVAL. If
+ you cannot actually change the frequency, do not define irq_set_freq.
- * RTC_PIE_ON, RTC_PIE_OFF: These are also emulated by the generic code.
+ * RTC_PIE_ON, RTC_PIE_OFF: the irq_set_state function will be called.
If all else fails, check out the rtc-test.c driver!
diff --git a/trunk/Documentation/spinlocks.txt b/trunk/Documentation/spinlocks.txt
index 2e3c64b1a6a5..178c831b907d 100644
--- a/trunk/Documentation/spinlocks.txt
+++ b/trunk/Documentation/spinlocks.txt
@@ -86,7 +86,7 @@ to change the variables it has to get an exclusive write lock.
The routines look the same as above:
- rwlock_t xxx_lock = __RW_LOCK_UNLOCKED(xxx_lock);
+ rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
unsigned long flags;
@@ -196,3 +196,25 @@ appropriate:
For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
+
+SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
+with lockdep state tracking.
+
+Most of the time, you can simply turn:
+ static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+into:
+ static DEFINE_SPINLOCK(xxx_lock);
+
+Static structure member variables go from:
+
+ struct foo bar {
+ .lock = SPIN_LOCK_UNLOCKED;
+ };
+
+to:
+
+ struct foo bar {
+ .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
+ };
+
+Declaration of static rw_locks undergo a similar transformation.
diff --git a/trunk/Documentation/trace/ftrace-design.txt b/trunk/Documentation/trace/ftrace-design.txt
index 79fcafc7fd64..dc52bd442c92 100644
--- a/trunk/Documentation/trace/ftrace-design.txt
+++ b/trunk/Documentation/trace/ftrace-design.txt
@@ -247,13 +247,6 @@ You need very few things to get the syscalls tracing in an arch.
- Support the TIF_SYSCALL_TRACEPOINT thread flags.
- Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
in the ptrace syscalls tracing path.
-- If the system call table on this arch is more complicated than a simple array
- of addresses of the system calls, implement an arch_syscall_addr to return
- the address of a given system call.
-- If the symbol names of the system calls do not match the function names on
- this arch, define ARCH_HAS_SYSCALL_MATCH_SYM_NAME in asm/ftrace.h and
- implement arch_syscall_match_sym_name with the appropriate logic to return
- true if the function name corresponds with the symbol name.
- Tag this arch as HAVE_SYSCALL_TRACEPOINTS.
diff --git a/trunk/Documentation/trace/ftrace.txt b/trunk/Documentation/trace/ftrace.txt
index 1ebc24cf9a55..557c1edeccaf 100644
--- a/trunk/Documentation/trace/ftrace.txt
+++ b/trunk/Documentation/trace/ftrace.txt
@@ -80,11 +80,11 @@ of ftrace. Here is a list of some of the key files:
tracers listed here can be configured by
echoing their name into current_tracer.
- tracing_on:
+ tracing_enabled:
- This sets or displays whether writing to the trace
- ring buffer is enabled. Echo 0 into this file to disable
- the tracer or 1 to enable it.
+ This sets or displays whether the current_tracer
+ is activated and tracing or not. Echo 0 into this
+ file to disable the tracer or 1 to enable it.
trace:
@@ -202,6 +202,10 @@ Here is the list of current tracers that may be configured.
to draw a graph of function calls similar to C code
source.
+ "sched_switch"
+
+ Traces the context switches and wakeups between tasks.
+
"irqsoff"
Traces the areas that disable interrupts and saves
@@ -269,6 +273,39 @@ format, the function name that was traced "path_put" and the
parent function that called this function "path_walk". The
timestamp is the time at which the function was entered.
+The sched_switch tracer also includes tracing of task wakeups
+and context switches.
+
+ ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 2916:115:S
+ ksoftirqd/1-7 [01] 1453.070013: 7:115:R + 10:115:S
+ ksoftirqd/1-7 [01] 1453.070013: 7:115:R ==> 10:115:R
+ events/1-10 [01] 1453.070013: 10:115:S ==> 2916:115:R
+ kondemand/1-2916 [01] 1453.070013: 2916:115:S ==> 7:115:R
+ ksoftirqd/1-7 [01] 1453.070013: 7:115:S ==> 0:140:R
+
+Wake ups are represented by a "+" and the context switches are
+shown as "==>". The format is:
+
+ Context switches:
+
+ Previous task Next Task
+
+ :: ==> ::
+
+ Wake ups:
+
+ Current task Task waking up
+
+ :: + ::
+
+The prio is the internal kernel priority, which is the inverse
+of the priority that is usually displayed by user-space tools.
+Zero represents the highest priority (99). Prio 100 starts the
+"nice" priorities with 100 being equal to nice -20 and 139 being
+nice 19. The prio "140" is reserved for the idle task which is
+the lowest priority thread (pid 0).
+
+
Latency trace format
--------------------
@@ -454,10 +491,78 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
latencies, as described in "Latency
trace format".
- overwrite - This controls what happens when the trace buffer is
- full. If "1" (default), the oldest events are
- discarded and overwritten. If "0", then the newest
- events are discarded.
+sched_switch
+------------
+
+This tracer simply records schedule switches. Here is an example
+of how to use it.
+
+ # echo sched_switch > current_tracer
+ # echo 1 > tracing_enabled
+ # sleep 1
+ # echo 0 > tracing_enabled
+ # cat trace
+
+# tracer: sched_switch
+#
+# TASK-PID CPU# TIMESTAMP FUNCTION
+# | | | | |
+ bash-3997 [01] 240.132281: 3997:120:R + 4055:120:R
+ bash-3997 [01] 240.132284: 3997:120:R ==> 4055:120:R
+ sleep-4055 [01] 240.132371: 4055:120:S ==> 3997:120:R
+ bash-3997 [01] 240.132454: 3997:120:R + 4055:120:S
+ bash-3997 [01] 240.132457: 3997:120:R ==> 4055:120:R
+ sleep-4055 [01] 240.132460: 4055:120:D ==> 3997:120:R
+ bash-3997 [01] 240.132463: 3997:120:R + 4055:120:D
+ bash-3997 [01] 240.132465: 3997:120:R ==> 4055:120:R
+ -0 [00] 240.132589: 0:140:R + 4:115:S
+ -0 [00] 240.132591: 0:140:R ==> 4:115:R
+ ksoftirqd/0-4 [00] 240.132595: 4:115:S ==> 0:140:R
+ -0 [00] 240.132598: 0:140:R + 4:115:S
+ -0 [00] 240.132599: 0:140:R ==> 4:115:R
+ ksoftirqd/0-4 [00] 240.132603: 4:115:S ==> 0:140:R
+ sleep-4055 [01] 240.133058: 4055:120:S ==> 3997:120:R
+ [...]
+
+
+As we have discussed previously about this format, the header
+shows the name of the trace and points to the options. The
+"FUNCTION" is a misnomer since here it represents the wake ups
+and context switches.
+
+The sched_switch file only lists the wake ups (represented with
+'+') and context switches ('==>') with the previous task or
+current task first followed by the next task or task waking up.
+The format for both of these is PID:KERNEL-PRIO:TASK-STATE.
+Remember that the KERNEL-PRIO is the inverse of the actual
+priority with zero (0) being the highest priority and the nice
+values starting at 100 (nice -20). Below is a quick chart to map
+the kernel priority to user land priorities.
+
+ Kernel Space User Space
+ ===============================================================
+ 0(high) to 98(low) user RT priority 99(high) to 1(low)
+ with SCHED_RR or SCHED_FIFO
+ ---------------------------------------------------------------
+ 99 sched_priority is not used in scheduling
+ decisions(it must be specified as 0)
+ ---------------------------------------------------------------
+ 100(high) to 139(low) user nice -20(high) to 19(low)
+ ---------------------------------------------------------------
+ 140 idle task priority
+ ---------------------------------------------------------------
+
+The task states are:
+
+ R - running : wants to run, may not actually be running
+ S - sleep : process is waiting to be woken up (handles signals)
+ D - disk sleep (uninterruptible sleep) : process must be woken up
+ (ignores signals)
+ T - stopped : process suspended
+ t - traced : process is being traced (with something like gdb)
+ Z - zombie : process waiting to be cleaned up
+ X - unknown
+
ftrace_enabled
--------------
@@ -502,10 +607,10 @@ an example:
# echo irqsoff > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# ls -ltr
[...]
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: irqsoff
#
@@ -610,10 +715,10 @@ is much like the irqsoff tracer.
# echo preemptoff > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# ls -ltr
[...]
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: preemptoff
#
@@ -758,10 +863,10 @@ tracers.
# echo preemptirqsoff > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# ls -ltr
[...]
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: preemptirqsoff
#
@@ -921,9 +1026,9 @@ Instead of performing an 'ls', we will run 'sleep 1' under
# echo wakeup > current_tracer
# echo latency-format > trace_options
# echo 0 > tracing_max_latency
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# chrt -f 5 sleep 1
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: wakeup
#
@@ -1035,9 +1140,9 @@ ftrace_enabled is set; otherwise this tracer is a nop.
# sysctl kernel.ftrace_enabled=1
# echo function > current_tracer
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# usleep 1
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: function
#
@@ -1075,7 +1180,7 @@ int trace_fd;
[...]
int main(int argc, char *argv[]) {
[...]
- trace_fd = open(tracing_file("tracing_on"), O_WRONLY);
+ trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
[...]
if (condition_hit()) {
write(trace_fd, "0", 1);
@@ -1526,9 +1631,9 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
# echo sys_nanosleep hrtimer_interrupt \
> set_ftrace_filter
# echo function > current_tracer
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# usleep 1
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: ftrace
#
@@ -1774,9 +1879,9 @@ different. The trace is live.
# echo function > current_tracer
# cat trace_pipe > /tmp/trace.out &
[1] 4153
- # echo 1 > tracing_on
+ # echo 1 > tracing_enabled
# usleep 1
- # echo 0 > tracing_on
+ # echo 0 > tracing_enabled
# cat trace
# tracer: function
#
diff --git a/trunk/Documentation/trace/kprobetrace.txt b/trunk/Documentation/trace/kprobetrace.txt
index 6d27ab8d6e9f..5f77d94598dd 100644
--- a/trunk/Documentation/trace/kprobetrace.txt
+++ b/trunk/Documentation/trace/kprobetrace.txt
@@ -42,25 +42,11 @@ Synopsis of kprobe_events
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
- (u8/u16/u32/u64/s8/s16/s32/s64), "string" and bitfield
- are supported.
+ (u8/u16/u32/u64/s8/s16/s32/s64) and string are supported.
(*) only for return probe.
(**) this is useful for fetching a field of data structures.
-Types
------
-Several types are supported for fetch-args. Kprobe tracer will access memory
-by given type. Prefix 's' and 'u' means those types are signed and unsigned
-respectively. Traced arguments are shown in decimal (signed) or hex (unsigned).
-String type is a special type, which fetches a "null-terminated" string from
-kernel space. This means it will fail and store NULL if the string container
-has been paged out.
-Bitfield is another special type, which takes 3 parameters, bit-width, bit-
-offset, and container-size (usually 32). The syntax is;
-
- b@/
-
Per-Probe Event Filtering
-------------------------
diff --git a/trunk/Documentation/workqueue.txt b/trunk/Documentation/workqueue.txt
index 01c513fac40e..996a27d9b8db 100644
--- a/trunk/Documentation/workqueue.txt
+++ b/trunk/Documentation/workqueue.txt
@@ -190,9 +190,9 @@ resources, scheduled and executed.
* Long running CPU intensive workloads which can be better
managed by the system scheduler.
- WQ_FREEZABLE
+ WQ_FREEZEABLE
- A freezable wq participates in the freeze phase of the system
+ A freezeable wq participates in the freeze phase of the system
suspend operations. Work items on the wq are drained and no
new work item starts execution until thawed.
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index f1bc3dc6b369..531c5cf150ba 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -885,7 +885,7 @@ S: Supported
ARM/QUALCOMM MSM MACHINE SUPPORT
M: David Brown
-M: Daniel Walker
+M: Daniel Walker
M: Bryan Huntsman
L: linux-arm-msm@vger.kernel.org
F: arch/arm/mach-msm/
@@ -1010,15 +1010,6 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5p*/
-ARM/SAMSUNG MOBILE MACHINE SUPPORT
-M: Kyungmin Park
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-s5pv210/mach-aquila.c
-F: arch/arm/mach-s5pv210/mach-goni.c
-F: arch/arm/mach-exynos4/mach-universal_c210.c
-F: arch/arm/mach-exynos4/mach-nuri.c
-
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
M: Kyungmin Park
M: Sylwester Nawrocki
@@ -1476,7 +1467,6 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh
-M: Andy Gospodarek
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
S: Supported
@@ -1702,13 +1692,6 @@ M: Andy Whitcroft
S: Supported
F: scripts/checkpatch.pl
-CHINESE DOCUMENTATION
-M: Harry Wei
-L: xiyoulinuxkernelgroup@googlegroups.com
-L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
-S: Maintained
-F: Documentation/zh_CN/
-
CISCO VIC ETHERNET NIC DRIVER
M: Vasanthy Kolluri
M: Roopa Prabhu
@@ -2043,7 +2026,7 @@ F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
DCCP PROTOCOL
-M: Gerrit Renker
+M: Arnaldo Carvalho de Melo
L: dccp@vger.kernel.org
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
S: Maintained
@@ -2143,7 +2126,6 @@ S: Supported
F: fs/dlm/
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
-M: Vinod Koul
M: Dan Williams
S: Supported
F: drivers/dma/
@@ -2792,15 +2774,6 @@ F: Documentation/isdn/README.gigaset
F: drivers/isdn/gigaset/
F: include/linux/gigaset_dev.h
-GPIO SUBSYSTEM
-M: Grant Likely
-L: linux-kernel@vger.kernel.org
-S: Maintained
-T: git git://git.secretlab.ca/git/linux-2.6.git
-F: Documentation/gpio/gpio.txt
-F: drivers/gpio/
-F: include/linux/gpio*
-
GRETH 10/100/1G Ethernet MAC device driver
M: Kristoffer Glembo
L: netdev@vger.kernel.org
@@ -2890,6 +2863,7 @@ M: Guenter Roeck
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
@@ -3529,7 +3503,7 @@ F: drivers/hwmon/jc42.c
F: Documentation/hwmon/jc42
JFS FILESYSTEM
-M: Dave Kleikamp
+M: Dave Kleikamp
L: jfs-discussion@lists.sourceforge.net
W: http://jfs.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@@ -4292,7 +4266,10 @@ S: Maintained
F: net/sched/sch_netem.c
NETERION 10GbE DRIVERS (s2io/vxge)
-M: Jon Mason
+M: Ramkrishna Vepa
+M: Sivakumar Subramani
+M: Sreenivasa Honnur
+M: Jon Mason
L: netdev@vger.kernel.org
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -4614,7 +4591,7 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely
-L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
+L: devicetree-discuss@lists.ozlabs.org
W: http://fdt.secretlab.ca
T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
@@ -5178,7 +5155,6 @@ F: drivers/char/random.c
RAPIDIO SUBSYSTEM
M: Matt Porter
-M: Alexandre Bounine
S: Maintained
F: drivers/rapidio/
@@ -5281,7 +5257,7 @@ S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
-M: Herton Ronaldo Krzesinski
+M: Herton Ronaldo Krzesinski
M: Hin-Tak Leung
M: Larry Finger
L: linux-wireless@vger.kernel.org
@@ -6119,7 +6095,7 @@ S: Maintained
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
-M: Herton Ronaldo Krzesinski
+M: Herton Ronaldo Krzesinski
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c
diff --git a/trunk/Makefile b/trunk/Makefile
index d6592b63c8cb..c9c8c8fd2591 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
-EXTRAVERSION =
+EXTRAVERSION = -rc4
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig
index cc31bec2e316..47f63d480141 100644
--- a/trunk/arch/alpha/Kconfig
+++ b/trunk/arch/alpha/Kconfig
@@ -11,7 +11,6 @@ config ALPHA
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
- select GENERIC_HARDIRQS_NO_DEPRECATED
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/trunk/arch/alpha/include/asm/futex.h b/trunk/arch/alpha/include/asm/futex.h
index e8a761aee088..945de222ab91 100644
--- a/trunk/arch/alpha/include/asm/futex.h
+++ b/trunk/arch/alpha/include/asm/futex.h
@@ -29,7 +29,7 @@
: "r" (uaddr), "r"(oparg) \
: "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -81,23 +81,21 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- int ret = 0, cmp;
- u32 prev;
+ int prev, cmp;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
__asm__ __volatile__ (
__ASM_SMP_MB
- "1: ldl_l %1,0(%3)\n"
- " cmpeq %1,%4,%2\n"
- " beq %2,3f\n"
- " mov %5,%2\n"
- "2: stl_c %2,0(%3)\n"
- " beq %2,4f\n"
+ "1: ldl_l %0,0(%2)\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,3f\n"
+ " mov %4,%1\n"
+ "2: stl_c %1,0(%2)\n"
+ " beq %1,4f\n"
"3: .subsection 2\n"
"4: br 1b\n"
" .previous\n"
@@ -107,12 +105,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .long 2b-.\n"
" lda $31,3b-2b(%0)\n"
" .previous\n"
- : "+r"(ret), "=&r"(prev), "=&r"(cmp)
+ : "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)oldval), "r"(newval)
: "memory");
- *uval = prev;
- return ret;
+ return prev;
}
#endif /* __KERNEL__ */
diff --git a/trunk/arch/alpha/include/asm/rwsem.h b/trunk/arch/alpha/include/asm/rwsem.h
index a83bbea62c67..1570c0b54336 100644
--- a/trunk/arch/alpha/include/asm/rwsem.h
+++ b/trunk/arch/alpha/include/asm/rwsem.h
@@ -13,13 +13,44 @@
#ifdef __KERNEL__
#include
+#include
+#include
+struct rwsem_waiter;
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ long count;
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
static inline void __down_read(struct rw_semaphore *sem)
{
@@ -219,5 +250,10 @@ static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
#endif
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _ALPHA_RWSEM_H */
diff --git a/trunk/arch/alpha/kernel/irq.c b/trunk/arch/alpha/kernel/irq.c
index a19d60082299..9ab234f48dd8 100644
--- a/trunk/arch/alpha/kernel/irq.c
+++ b/trunk/arch/alpha/kernel/irq.c
@@ -44,16 +44,11 @@ static char irq_user_affinity[NR_IRQS];
int irq_select_affinity(unsigned int irq)
{
- struct irq_data *data = irq_get_irq_data(irq);
- struct irq_chip *chip;
+ struct irq_desc *desc = irq_to_desc[irq];
static int last_cpu;
int cpu = last_cpu + 1;
- if (!data)
- return 1;
- chip = irq_data_get_irq_chip(data);
-
- if (!chip->irq_set_affinity || irq_user_affinity[irq])
+ if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
return 1;
while (!cpu_possible(cpu) ||
@@ -61,8 +56,8 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
- cpumask_copy(data->affinity, cpumask_of(cpu));
- chip->irq_set_affinity(data, cpumask_of(cpu), false);
+ cpumask_copy(desc->affinity, cpumask_of(cpu));
+ get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
return 0;
}
#endif /* CONFIG_SMP */
diff --git a/trunk/arch/alpha/kernel/irq_alpha.c b/trunk/arch/alpha/kernel/irq_alpha.c
index 411ca11d0a18..2d0679b60939 100644
--- a/trunk/arch/alpha/kernel/irq_alpha.c
+++ b/trunk/arch/alpha/kernel/irq_alpha.c
@@ -228,9 +228,14 @@ struct irqaction timer_irqaction = {
void __init
init_rtc_irq(void)
{
- set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
- handle_simple_irq, "RTC");
- setup_irq(RTC_IRQ, &timer_irqaction);
+ struct irq_desc *desc = irq_to_desc(RTC_IRQ);
+
+ if (desc) {
+ desc->status |= IRQ_DISABLED;
+ set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+ handle_simple_irq, "RTC");
+ setup_irq(RTC_IRQ, &timer_irqaction);
+ }
}
/* Dummy irqactions. */
diff --git a/trunk/arch/alpha/kernel/irq_i8259.c b/trunk/arch/alpha/kernel/irq_i8259.c
index c7cc9813e45f..956ea0ed1694 100644
--- a/trunk/arch/alpha/kernel/irq_i8259.c
+++ b/trunk/arch/alpha/kernel/irq_i8259.c
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
}
inline void
-i8259a_enable_irq(struct irq_data *d)
+i8259a_enable_irq(unsigned int irq)
{
spin_lock(&i8259_irq_lock);
- i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
+ i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
spin_unlock(&i8259_irq_lock);
}
@@ -47,18 +47,16 @@ __i8259a_disable_irq(unsigned int irq)
}
void
-i8259a_disable_irq(struct irq_data *d)
+i8259a_disable_irq(unsigned int irq)
{
spin_lock(&i8259_irq_lock);
- __i8259a_disable_irq(d->irq);
+ __i8259a_disable_irq(irq);
spin_unlock(&i8259_irq_lock);
}
void
-i8259a_mask_and_ack_irq(struct irq_data *d)
+i8259a_mask_and_ack_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
-
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq);
@@ -73,9 +71,9 @@ i8259a_mask_and_ack_irq(struct irq_data *d)
struct irq_chip i8259a_irq_type = {
.name = "XT-PIC",
- .irq_unmask = i8259a_enable_irq,
- .irq_mask = i8259a_disable_irq,
- .irq_mask_ack = i8259a_mask_and_ack_irq,
+ .unmask = i8259a_enable_irq,
+ .mask = i8259a_disable_irq,
+ .mask_ack = i8259a_mask_and_ack_irq,
};
void __init
diff --git a/trunk/arch/alpha/kernel/irq_impl.h b/trunk/arch/alpha/kernel/irq_impl.h
index d507a234b05d..b63ccd7386f1 100644
--- a/trunk/arch/alpha/kernel/irq_impl.h
+++ b/trunk/arch/alpha/kernel/irq_impl.h
@@ -31,9 +31,11 @@ extern void init_rtc_irq(void);
extern void common_init_isa_dma(void);
-extern void i8259a_enable_irq(struct irq_data *d);
-extern void i8259a_disable_irq(struct irq_data *d);
-extern void i8259a_mask_and_ack_irq(struct irq_data *d);
+extern void i8259a_enable_irq(unsigned int);
+extern void i8259a_disable_irq(unsigned int);
+extern void i8259a_mask_and_ack_irq(unsigned int);
+extern unsigned int i8259a_startup_irq(unsigned int);
+extern void i8259a_end_irq(unsigned int);
extern struct irq_chip i8259a_irq_type;
extern void init_i8259a_irqs(void);
diff --git a/trunk/arch/alpha/kernel/irq_pyxis.c b/trunk/arch/alpha/kernel/irq_pyxis.c
index b30227fa7f5f..2863458c853e 100644
--- a/trunk/arch/alpha/kernel/irq_pyxis.c
+++ b/trunk/arch/alpha/kernel/irq_pyxis.c
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
}
static inline void
-pyxis_enable_irq(struct irq_data *d)
+pyxis_enable_irq(unsigned int irq)
{
- pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
+ pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
}
static void
-pyxis_disable_irq(struct irq_data *d)
+pyxis_disable_irq(unsigned int irq)
{
- pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
+ pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
}
static void
-pyxis_mask_and_ack_irq(struct irq_data *d)
+pyxis_mask_and_ack_irq(unsigned int irq)
{
- unsigned long bit = 1UL << (d->irq - 16);
+ unsigned long bit = 1UL << (irq - 16);
unsigned long mask = cached_irq_mask &= ~bit;
/* Disable the interrupt. */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(struct irq_data *d)
static struct irq_chip pyxis_irq_type = {
.name = "PYXIS",
- .irq_mask_ack = pyxis_mask_and_ack_irq,
- .irq_mask = pyxis_disable_irq,
- .irq_unmask = pyxis_enable_irq,
+ .mask_ack = pyxis_mask_and_ack_irq,
+ .mask = pyxis_disable_irq,
+ .unmask = pyxis_enable_irq,
};
void
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
if ((ignore_mask >> i) & 1)
continue;
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
+ irq_to_desc(i)->status |= IRQ_LEVEL;
}
setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/trunk/arch/alpha/kernel/irq_srm.c b/trunk/arch/alpha/kernel/irq_srm.c
index 82a47bba41c4..0e57e828b413 100644
--- a/trunk/arch/alpha/kernel/irq_srm.c
+++ b/trunk/arch/alpha/kernel/irq_srm.c
@@ -18,27 +18,27 @@
DEFINE_SPINLOCK(srm_irq_lock);
static inline void
-srm_enable_irq(struct irq_data *d)
+srm_enable_irq(unsigned int irq)
{
spin_lock(&srm_irq_lock);
- cserve_ena(d->irq - 16);
+ cserve_ena(irq - 16);
spin_unlock(&srm_irq_lock);
}
static void
-srm_disable_irq(struct irq_data *d)
+srm_disable_irq(unsigned int irq)
{
spin_lock(&srm_irq_lock);
- cserve_dis(d->irq - 16);
+ cserve_dis(irq - 16);
spin_unlock(&srm_irq_lock);
}
/* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct irq_chip srm_irq_type = {
.name = "SRM",
- .irq_unmask = srm_enable_irq,
- .irq_mask = srm_disable_irq,
- .irq_mask_ack = srm_disable_irq,
+ .unmask = srm_enable_irq,
+ .mask = srm_disable_irq,
+ .mask_ack = srm_disable_irq,
};
void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
if (i < 64 && ((ignore_mask >> i) & 1))
continue;
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
+ irq_to_desc(i)->status |= IRQ_LEVEL;
}
}
diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c
index 376f22130791..fe698b5045e9 100644
--- a/trunk/arch/alpha/kernel/osf_sys.c
+++ b/trunk/arch/alpha/kernel/osf_sys.c
@@ -230,24 +230,44 @@ linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_st
return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
}
-SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
- struct osf_statfs __user *, buffer, unsigned long, bufsiz)
+static int
+do_osf_statfs(struct path *path, struct osf_statfs __user *buffer,
+ unsigned long bufsiz)
{
struct kstatfs linux_stat;
- int error = user_statfs(pathname, &linux_stat);
+ int error = vfs_statfs(path, &linux_stat);
if (!error)
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
return error;
}
+SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
+ struct osf_statfs __user *, buffer, unsigned long, bufsiz)
+{
+ struct path path;
+ int retval;
+
+ retval = user_path(pathname, &path);
+ if (!retval) {
+ retval = do_osf_statfs(&path, buffer, bufsiz);
+ path_put(&path);
+ }
+ return retval;
+}
+
SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
- struct kstatfs linux_stat;
- int error = fd_statfs(fd, &linux_stat);
- if (!error)
- error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
- return error;
+ struct file *file;
+ int retval;
+
+ retval = -EBADF;
+ file = fget(fd);
+ if (file) {
+ retval = do_osf_statfs(&file->f_path, buffer, bufsiz);
+ fput(file);
+ }
+ return retval;
}
/*
diff --git a/trunk/arch/alpha/kernel/sys_alcor.c b/trunk/arch/alpha/kernel/sys_alcor.c
index 88d95e872f55..7bef61768236 100644
--- a/trunk/arch/alpha/kernel/sys_alcor.c
+++ b/trunk/arch/alpha/kernel/sys_alcor.c
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
}
static inline void
-alcor_enable_irq(struct irq_data *d)
+alcor_enable_irq(unsigned int irq)
{
- alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
+ alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
}
static void
-alcor_disable_irq(struct irq_data *d)
+alcor_disable_irq(unsigned int irq)
{
- alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
+ alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
}
static void
-alcor_mask_and_ack_irq(struct irq_data *d)
+alcor_mask_and_ack_irq(unsigned int irq)
{
- alcor_disable_irq(d);
+ alcor_disable_irq(irq);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
- *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
+ *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static void
-alcor_isa_mask_and_ack_irq(struct irq_data *d)
+alcor_isa_mask_and_ack_irq(unsigned int irq)
{
- i8259a_mask_and_ack_irq(d);
+ i8259a_mask_and_ack_irq(irq);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(struct irq_data *d)
static struct irq_chip alcor_irq_type = {
.name = "ALCOR",
- .irq_unmask = alcor_enable_irq,
- .irq_mask = alcor_disable_irq,
- .irq_mask_ack = alcor_mask_and_ack_irq,
+ .unmask = alcor_enable_irq,
+ .mask = alcor_disable_irq,
+ .mask_ack = alcor_mask_and_ack_irq,
};
static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
if (i >= 16+20 && i <= 16+30)
continue;
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
+ irq_to_desc(i)->status |= IRQ_LEVEL;
}
- i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
+ i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
init_i8259a_irqs();
common_init_isa_dma();
diff --git a/trunk/arch/alpha/kernel/sys_cabriolet.c b/trunk/arch/alpha/kernel/sys_cabriolet.c
index 57eb6307bc27..b0c916493aea 100644
--- a/trunk/arch/alpha/kernel/sys_cabriolet.c
+++ b/trunk/arch/alpha/kernel/sys_cabriolet.c
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
}
static inline void
-cabriolet_enable_irq(struct irq_data *d)
+cabriolet_enable_irq(unsigned int irq)
{
- cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
+ cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
}
static void
-cabriolet_disable_irq(struct irq_data *d)
+cabriolet_disable_irq(unsigned int irq)
{
- cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
+ cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
}
static struct irq_chip cabriolet_irq_type = {
.name = "CABRIOLET",
- .irq_unmask = cabriolet_enable_irq,
- .irq_mask = cabriolet_disable_irq,
- .irq_mask_ack = cabriolet_disable_irq,
+ .unmask = cabriolet_enable_irq,
+ .mask = cabriolet_disable_irq,
+ .mask_ack = cabriolet_disable_irq,
};
static void
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
for (i = 16; i < 35; ++i) {
set_irq_chip_and_handler(i, &cabriolet_irq_type,
handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
+ irq_to_desc(i)->status |= IRQ_LEVEL;
}
}
diff --git a/trunk/arch/alpha/kernel/sys_dp264.c b/trunk/arch/alpha/kernel/sys_dp264.c
index 481df4ecb651..edad5f759ccd 100644
--- a/trunk/arch/alpha/kernel/sys_dp264.c
+++ b/trunk/arch/alpha/kernel/sys_dp264.c
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
}
static void
-dp264_enable_irq(struct irq_data *d)
+dp264_enable_irq(unsigned int irq)
{
spin_lock(&dp264_irq_lock);
- cached_irq_mask |= 1UL << d->irq;
+ cached_irq_mask |= 1UL << irq;
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
-dp264_disable_irq(struct irq_data *d)
+dp264_disable_irq(unsigned int irq)
{
spin_lock(&dp264_irq_lock);
- cached_irq_mask &= ~(1UL << d->irq);
+ cached_irq_mask &= ~(1UL << irq);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
-clipper_enable_irq(struct irq_data *d)
+clipper_enable_irq(unsigned int irq)
{
spin_lock(&dp264_irq_lock);
- cached_irq_mask |= 1UL << (d->irq - 16);
+ cached_irq_mask |= 1UL << (irq - 16);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
-clipper_disable_irq(struct irq_data *d)
+clipper_disable_irq(unsigned int irq)
{
spin_lock(&dp264_irq_lock);
- cached_irq_mask &= ~(1UL << (d->irq - 16));
+ cached_irq_mask &= ~(1UL << (irq - 16));
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
@@ -149,11 +149,10 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
}
static int
-dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
- bool force)
-{
+dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
+{
spin_lock(&dp264_irq_lock);
- cpu_set_irq_affinity(d->irq, *affinity);
+ cpu_set_irq_affinity(irq, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
@@ -161,11 +160,10 @@ dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
}
static int
-clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
- bool force)
-{
+clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
+{
spin_lock(&dp264_irq_lock);
- cpu_set_irq_affinity(d->irq - 16, *affinity);
+ cpu_set_irq_affinity(irq - 16, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
@@ -173,19 +171,19 @@ clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
}
static struct irq_chip dp264_irq_type = {
- .name = "DP264",
- .irq_unmask = dp264_enable_irq,
- .irq_mask = dp264_disable_irq,
- .irq_mask_ack = dp264_disable_irq,
- .irq_set_affinity = dp264_set_affinity,
+ .name = "DP264",
+ .unmask = dp264_enable_irq,
+ .mask = dp264_disable_irq,
+ .mask_ack = dp264_disable_irq,
+ .set_affinity = dp264_set_affinity,
};
static struct irq_chip clipper_irq_type = {
- .name = "CLIPPER",
- .irq_unmask = clipper_enable_irq,
- .irq_mask = clipper_disable_irq,
- .irq_mask_ack = clipper_disable_irq,
- .irq_set_affinity = clipper_set_affinity,
+ .name = "CLIPPER",
+ .unmask = clipper_enable_irq,
+ .mask = clipper_disable_irq,
+ .mask_ack = clipper_disable_irq,
+ .set_affinity = clipper_set_affinity,
};
static void
@@ -270,8 +268,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
}
diff --git a/trunk/arch/alpha/kernel/sys_eb64p.c b/trunk/arch/alpha/kernel/sys_eb64p.c
index 402e908ffb3e..ae5f29d127b0 100644
--- a/trunk/arch/alpha/kernel/sys_eb64p.c
+++ b/trunk/arch/alpha/kernel/sys_eb64p.c
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
}
static inline void
-eb64p_enable_irq(struct irq_data *d)
+eb64p_enable_irq(unsigned int irq)
{
- eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
+ eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
}
static void
-eb64p_disable_irq(struct irq_data *d)
+eb64p_disable_irq(unsigned int irq)
{
- eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
+ eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
}
static struct irq_chip eb64p_irq_type = {
.name = "EB64P",
- .irq_unmask = eb64p_enable_irq,
- .irq_mask = eb64p_disable_irq,
- .irq_mask_ack = eb64p_disable_irq,
+ .unmask = eb64p_enable_irq,
+ .mask = eb64p_disable_irq,
+ .mask_ack = eb64p_disable_irq,
};
static void
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
init_i8259a_irqs();
for (i = 16; i < 32; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
- }
+ }
common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction);
diff --git a/trunk/arch/alpha/kernel/sys_eiger.c b/trunk/arch/alpha/kernel/sys_eiger.c
index 0b44a54c1522..1121bc5c6c6c 100644
--- a/trunk/arch/alpha/kernel/sys_eiger.c
+++ b/trunk/arch/alpha/kernel/sys_eiger.c
@@ -51,18 +51,16 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
}
static inline void
-eiger_enable_irq(struct irq_data *d)
+eiger_enable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
eiger_update_irq_hw(irq, mask);
}
static void
-eiger_disable_irq(struct irq_data *d)
+eiger_disable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
eiger_update_irq_hw(irq, mask);
@@ -70,9 +68,9 @@ eiger_disable_irq(struct irq_data *d)
static struct irq_chip eiger_irq_type = {
.name = "EIGER",
- .irq_unmask = eiger_enable_irq,
- .irq_mask = eiger_disable_irq,
- .irq_mask_ack = eiger_disable_irq,
+ .unmask = eiger_enable_irq,
+ .mask = eiger_disable_irq,
+ .mask_ack = eiger_disable_irq,
};
static void
@@ -138,8 +136,8 @@ eiger_init_irq(void)
init_i8259a_irqs();
for (i = 16; i < 128; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
}
diff --git a/trunk/arch/alpha/kernel/sys_jensen.c b/trunk/arch/alpha/kernel/sys_jensen.c
index 00341b75c8b2..34f55e03d331 100644
--- a/trunk/arch/alpha/kernel/sys_jensen.c
+++ b/trunk/arch/alpha/kernel/sys_jensen.c
@@ -63,34 +63,34 @@
*/
static void
-jensen_local_enable(struct irq_data *d)
+jensen_local_enable(unsigned int irq)
{
/* the parport is really hw IRQ 1, silly Jensen. */
- if (d->irq == 7)
- i8259a_enable_irq(d);
+ if (irq == 7)
+ i8259a_enable_irq(1);
}
static void
-jensen_local_disable(struct irq_data *d)
+jensen_local_disable(unsigned int irq)
{
/* the parport is really hw IRQ 1, silly Jensen. */
- if (d->irq == 7)
- i8259a_disable_irq(d);
+ if (irq == 7)
+ i8259a_disable_irq(1);
}
static void
-jensen_local_mask_ack(struct irq_data *d)
+jensen_local_mask_ack(unsigned int irq)
{
/* the parport is really hw IRQ 1, silly Jensen. */
- if (d->irq == 7)
- i8259a_mask_and_ack_irq(d);
+ if (irq == 7)
+ i8259a_mask_and_ack_irq(1);
}
static struct irq_chip jensen_local_irq_type = {
.name = "LOCAL",
- .irq_unmask = jensen_local_enable,
- .irq_mask = jensen_local_disable,
- .irq_mask_ack = jensen_local_mask_ack,
+ .unmask = jensen_local_enable,
+ .mask = jensen_local_disable,
+ .mask_ack = jensen_local_mask_ack,
};
static void
diff --git a/trunk/arch/alpha/kernel/sys_marvel.c b/trunk/arch/alpha/kernel/sys_marvel.c
index e61910734e41..2bfc9f1b1ddc 100644
--- a/trunk/arch/alpha/kernel/sys_marvel.c
+++ b/trunk/arch/alpha/kernel/sys_marvel.c
@@ -104,10 +104,9 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
}
static void
-io7_enable_irq(struct irq_data *d)
+io7_enable_irq(unsigned int irq)
{
volatile unsigned long *ctl;
- unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
@@ -116,7 +115,7 @@ io7_enable_irq(struct irq_data *d)
__func__, irq);
return;
}
-
+
spin_lock(&io7->irq_lock);
*ctl |= 1UL << 24;
mb();
@@ -125,10 +124,9 @@ io7_enable_irq(struct irq_data *d)
}
static void
-io7_disable_irq(struct irq_data *d)
+io7_disable_irq(unsigned int irq)
{
volatile unsigned long *ctl;
- unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
@@ -137,7 +135,7 @@ io7_disable_irq(struct irq_data *d)
__func__, irq);
return;
}
-
+
spin_lock(&io7->irq_lock);
*ctl &= ~(1UL << 24);
mb();
@@ -146,29 +144,35 @@ io7_disable_irq(struct irq_data *d)
}
static void
-marvel_irq_noop(struct irq_data *d)
-{
- return;
+marvel_irq_noop(unsigned int irq)
+{
+ return;
+}
+
+static unsigned int
+marvel_irq_noop_return(unsigned int irq)
+{
+ return 0;
}
static struct irq_chip marvel_legacy_irq_type = {
.name = "LEGACY",
- .irq_mask = marvel_irq_noop,
- .irq_unmask = marvel_irq_noop,
+ .mask = marvel_irq_noop,
+ .unmask = marvel_irq_noop,
};
static struct irq_chip io7_lsi_irq_type = {
.name = "LSI",
- .irq_unmask = io7_enable_irq,
- .irq_mask = io7_disable_irq,
- .irq_mask_ack = io7_disable_irq,
+ .unmask = io7_enable_irq,
+ .mask = io7_disable_irq,
+ .mask_ack = io7_disable_irq,
};
static struct irq_chip io7_msi_irq_type = {
.name = "MSI",
- .irq_unmask = io7_enable_irq,
- .irq_mask = io7_disable_irq,
- .irq_ack = marvel_irq_noop,
+ .unmask = io7_enable_irq,
+ .mask = io7_disable_irq,
+ .ack = marvel_irq_noop,
};
static void
@@ -276,8 +280,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) {
+ irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
/* Disable the implemented irqs in hardware. */
@@ -290,8 +294,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) {
+ irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
for (i = 0; i < 16; ++i)
diff --git a/trunk/arch/alpha/kernel/sys_mikasa.c b/trunk/arch/alpha/kernel/sys_mikasa.c
index cf7f43dd3147..bcc1639e8efb 100644
--- a/trunk/arch/alpha/kernel/sys_mikasa.c
+++ b/trunk/arch/alpha/kernel/sys_mikasa.c
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
}
static inline void
-mikasa_enable_irq(struct irq_data *d)
+mikasa_enable_irq(unsigned int irq)
{
- mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
+ mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
}
static void
-mikasa_disable_irq(struct irq_data *d)
+mikasa_disable_irq(unsigned int irq)
{
- mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
+ mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
}
static struct irq_chip mikasa_irq_type = {
.name = "MIKASA",
- .irq_unmask = mikasa_enable_irq,
- .irq_mask = mikasa_disable_irq,
- .irq_mask_ack = mikasa_disable_irq,
+ .unmask = mikasa_enable_irq,
+ .mask = mikasa_disable_irq,
+ .mask_ack = mikasa_disable_irq,
};
static void
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
mikasa_update_irq_hw(0);
for (i = 16; i < 32; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
diff --git a/trunk/arch/alpha/kernel/sys_noritake.c b/trunk/arch/alpha/kernel/sys_noritake.c
index 92bc188e94a9..e88f4ae1260e 100644
--- a/trunk/arch/alpha/kernel/sys_noritake.c
+++ b/trunk/arch/alpha/kernel/sys_noritake.c
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
}
static void
-noritake_enable_irq(struct irq_data *d)
+noritake_enable_irq(unsigned int irq)
{
- noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
+ noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
}
static void
-noritake_disable_irq(struct irq_data *d)
+noritake_disable_irq(unsigned int irq)
{
- noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
+ noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
}
static struct irq_chip noritake_irq_type = {
.name = "NORITAKE",
- .irq_unmask = noritake_enable_irq,
- .irq_mask = noritake_disable_irq,
- .irq_mask_ack = noritake_disable_irq,
+ .unmask = noritake_enable_irq,
+ .mask = noritake_disable_irq,
+ .mask_ack = noritake_disable_irq,
};
static void
@@ -127,8 +127,8 @@ noritake_init_irq(void)
outw(0, 0x54c);
for (i = 16; i < 48; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
diff --git a/trunk/arch/alpha/kernel/sys_rawhide.c b/trunk/arch/alpha/kernel/sys_rawhide.c
index 936d4140ed5f..6a51364dd1cc 100644
--- a/trunk/arch/alpha/kernel/sys_rawhide.c
+++ b/trunk/arch/alpha/kernel/sys_rawhide.c
@@ -56,10 +56,9 @@ rawhide_update_irq_hw(int hose, int mask)
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
static inline void
-rawhide_enable_irq(struct irq_data *d)
+rawhide_enable_irq(unsigned int irq)
{
unsigned int mask, hose;
- unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@@ -77,10 +76,9 @@ rawhide_enable_irq(struct irq_data *d)
}
static void
-rawhide_disable_irq(struct irq_data *d)
+rawhide_disable_irq(unsigned int irq)
{
unsigned int mask, hose;
- unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@@ -98,10 +96,9 @@ rawhide_disable_irq(struct irq_data *d)
}
static void
-rawhide_mask_and_ack_irq(struct irq_data *d)
+rawhide_mask_and_ack_irq(unsigned int irq)
{
unsigned int mask, mask1, hose;
- unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@@ -126,9 +123,9 @@ rawhide_mask_and_ack_irq(struct irq_data *d)
static struct irq_chip rawhide_irq_type = {
.name = "RAWHIDE",
- .irq_unmask = rawhide_enable_irq,
- .irq_mask = rawhide_disable_irq,
- .irq_mask_ack = rawhide_mask_and_ack_irq,
+ .unmask = rawhide_enable_irq,
+ .mask = rawhide_disable_irq,
+ .mask_ack = rawhide_mask_and_ack_irq,
};
static void
@@ -180,8 +177,8 @@ rawhide_init_irq(void)
}
for (i = 16; i < 128; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
diff --git a/trunk/arch/alpha/kernel/sys_rx164.c b/trunk/arch/alpha/kernel/sys_rx164.c
index cea22a62913b..89e7e37ec84c 100644
--- a/trunk/arch/alpha/kernel/sys_rx164.c
+++ b/trunk/arch/alpha/kernel/sys_rx164.c
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
}
static inline void
-rx164_enable_irq(struct irq_data *d)
+rx164_enable_irq(unsigned int irq)
{
- rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
+ rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
}
static void
-rx164_disable_irq(struct irq_data *d)
+rx164_disable_irq(unsigned int irq)
{
- rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
+ rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
}
static struct irq_chip rx164_irq_type = {
.name = "RX164",
- .irq_unmask = rx164_enable_irq,
- .irq_mask = rx164_disable_irq,
- .irq_mask_ack = rx164_disable_irq,
+ .unmask = rx164_enable_irq,
+ .mask = rx164_disable_irq,
+ .mask_ack = rx164_disable_irq,
};
static void
@@ -99,8 +99,8 @@ rx164_init_irq(void)
rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
diff --git a/trunk/arch/alpha/kernel/sys_sable.c b/trunk/arch/alpha/kernel/sys_sable.c
index a349538aabc9..5c4423d1b06c 100644
--- a/trunk/arch/alpha/kernel/sys_sable.c
+++ b/trunk/arch/alpha/kernel/sys_sable.c
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
/* GENERIC irq routines */
static inline void
-sable_lynx_enable_irq(struct irq_data *d)
+sable_lynx_enable_irq(unsigned int irq)
{
unsigned long bit, mask;
- bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
+ bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(struct irq_data *d)
}
static void
-sable_lynx_disable_irq(struct irq_data *d)
+sable_lynx_disable_irq(unsigned int irq)
{
unsigned long bit, mask;
- bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
+ bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(struct irq_data *d)
}
static void
-sable_lynx_mask_and_ack_irq(struct irq_data *d)
+sable_lynx_mask_and_ack_irq(unsigned int irq)
{
unsigned long bit, mask;
- bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
+ bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(struct irq_data *d)
static struct irq_chip sable_lynx_irq_type = {
.name = "SABLE/LYNX",
- .irq_unmask = sable_lynx_enable_irq,
- .irq_mask = sable_lynx_disable_irq,
- .irq_mask_ack = sable_lynx_mask_and_ack_irq,
+ .unmask = sable_lynx_enable_irq,
+ .mask = sable_lynx_disable_irq,
+ .mask_ack = sable_lynx_mask_and_ack_irq,
};
static void
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
long i;
for (i = 0; i < nr_of_irqs; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &sable_lynx_irq_type,
handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
diff --git a/trunk/arch/alpha/kernel/sys_takara.c b/trunk/arch/alpha/kernel/sys_takara.c
index 42a5331f13c4..f8a1e8a862fb 100644
--- a/trunk/arch/alpha/kernel/sys_takara.c
+++ b/trunk/arch/alpha/kernel/sys_takara.c
@@ -45,18 +45,16 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
}
static inline void
-takara_enable_irq(struct irq_data *d)
+takara_enable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
takara_update_irq_hw(irq, mask);
}
static void
-takara_disable_irq(struct irq_data *d)
+takara_disable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
takara_update_irq_hw(irq, mask);
@@ -64,9 +62,9 @@ takara_disable_irq(struct irq_data *d)
static struct irq_chip takara_irq_type = {
.name = "TAKARA",
- .irq_unmask = takara_enable_irq,
- .irq_mask = takara_disable_irq,
- .irq_mask_ack = takara_disable_irq,
+ .unmask = takara_enable_irq,
+ .mask = takara_disable_irq,
+ .mask_ack = takara_disable_irq,
};
static void
@@ -138,8 +136,8 @@ takara_init_irq(void)
takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
diff --git a/trunk/arch/alpha/kernel/sys_titan.c b/trunk/arch/alpha/kernel/sys_titan.c
index 8c13a0c77830..e02494bf5ef3 100644
--- a/trunk/arch/alpha/kernel/sys_titan.c
+++ b/trunk/arch/alpha/kernel/sys_titan.c
@@ -112,9 +112,8 @@ titan_update_irq_hw(unsigned long mask)
}
static inline void
-titan_enable_irq(struct irq_data *d)
+titan_enable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask |= 1UL << (irq - 16);
titan_update_irq_hw(titan_cached_irq_mask);
@@ -122,9 +121,8 @@ titan_enable_irq(struct irq_data *d)
}
static inline void
-titan_disable_irq(struct irq_data *d)
+titan_disable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask &= ~(1UL << (irq - 16));
titan_update_irq_hw(titan_cached_irq_mask);
@@ -146,10 +144,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
}
static int
-titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
- bool force)
+titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
{
- unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cpu_set_irq_affinity(irq - 16, *affinity);
titan_update_irq_hw(titan_cached_irq_mask);
@@ -179,17 +175,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
+ irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq);
- irq_set_status_flags(i, IRQ_LEVEL);
}
}
static struct irq_chip titan_irq_type = {
- .name = "TITAN",
- .irq_unmask = titan_enable_irq,
- .irq_mask = titan_disable_irq,
- .irq_mask_ack = titan_disable_irq,
- .irq_set_affinity = titan_set_irq_affinity,
+ .name = "TITAN",
+ .unmask = titan_enable_irq,
+ .mask = titan_disable_irq,
+ .mask_ack = titan_disable_irq,
+ .set_affinity = titan_set_irq_affinity,
};
static irqreturn_t
diff --git a/trunk/arch/alpha/kernel/sys_wildfire.c b/trunk/arch/alpha/kernel/sys_wildfire.c
index ca60a387ef0a..eec52594d410 100644
--- a/trunk/arch/alpha/kernel/sys_wildfire.c
+++ b/trunk/arch/alpha/kernel/sys_wildfire.c
@@ -104,12 +104,10 @@ wildfire_init_irq_hw(void)
}
static void
-wildfire_enable_irq(struct irq_data *d)
+wildfire_enable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
-
if (irq < 16)
- i8259a_enable_irq(d);
+ i8259a_enable_irq(irq);
spin_lock(&wildfire_irq_lock);
set_bit(irq, &cached_irq_mask);
@@ -118,12 +116,10 @@ wildfire_enable_irq(struct irq_data *d)
}
static void
-wildfire_disable_irq(struct irq_data *d)
+wildfire_disable_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
-
if (irq < 16)
- i8259a_disable_irq(d);
+ i8259a_disable_irq(irq);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
@@ -132,12 +128,10 @@ wildfire_disable_irq(struct irq_data *d)
}
static void
-wildfire_mask_and_ack_irq(struct irq_data *d)
+wildfire_mask_and_ack_irq(unsigned int irq)
{
- unsigned int irq = d->irq;
-
if (irq < 16)
- i8259a_mask_and_ack_irq(d);
+ i8259a_mask_and_ack_irq(irq);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
@@ -147,9 +141,9 @@ wildfire_mask_and_ack_irq(struct irq_data *d)
static struct irq_chip wildfire_irq_type = {
.name = "WILDFIRE",
- .irq_unmask = wildfire_enable_irq,
- .irq_mask = wildfire_disable_irq,
- .irq_mask_ack = wildfire_mask_and_ack_irq,
+ .unmask = wildfire_enable_irq,
+ .mask = wildfire_disable_irq,
+ .mask_ack = wildfire_mask_and_ack_irq,
};
static void __init
@@ -183,21 +177,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
for (i = 0; i < 16; ++i) {
if (i == 2)
continue;
+ irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq);
- irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
+ irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
handle_level_irq);
- irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) {
+ irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq);
- irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
- setup_irq(32+irq_bias, &isa_enable);
+ setup_irq(32+irq_bias, &isa_enable);
}
static void __init
diff --git a/trunk/arch/alpha/kernel/time.c b/trunk/arch/alpha/kernel/time.c
index a58e84f1a63b..c1f3e7cb82a4 100644
--- a/trunk/arch/alpha/kernel/time.c
+++ b/trunk/arch/alpha/kernel/time.c
@@ -159,7 +159,7 @@ void read_persistent_clock(struct timespec *ts)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
irqreturn_t timer_interrupt(int irq, void *dev)
{
@@ -172,6 +172,8 @@ irqreturn_t timer_interrupt(int irq, void *dev)
profile_tick(CPU_PROFILING);
#endif
+ write_seqlock(&xtime_lock);
+
/*
* Calculate how many ticks have passed since the last update,
* including any previous partial leftover. Save any resulting
@@ -185,7 +187,9 @@ irqreturn_t timer_interrupt(int irq, void *dev)
nticks = delta >> FIX_SHIFT;
if (nticks)
- xtime_update(nticks);
+ do_timer(nticks);
+
+ write_sequnlock(&xtime_lock);
if (test_irq_work_pending()) {
clear_irq_work_pending();
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 166efa2a19cd..5cff165b7eb0 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -1177,31 +1177,6 @@ config ARM_ERRATA_743622
visible impact on the overall performance or power consumption of the
processor.
-config ARM_ERRATA_751472
- bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for the 751472 Cortex-A9 (prior
- to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
- completion of a following broadcasted operation if the second
- operation is received by a CPU before the ICIALLUIS has completed,
- potentially leading to corrupted entries in the cache or TLB.
-
-config ARM_ERRATA_753970
- bool "ARM errata: cache sync operation may be faulty"
- depends on CACHE_PL310
- help
- This option enables the workaround for the 753970 PL310 (r3p0) erratum.
-
- Under some condition the effect of cache sync operation on
- the store buffer still remains when the operation completes.
- This means that the store buffer is always asked to drain and
- this prevents it from merging any further writes. The workaround
- is to replace the normal offset of cache sync operation (0x730)
- by another offset targeting an unmapped PL310 register 0x740.
- This has the same effect as the cache sync operation: store buffer
- drain and waiting for all buffers empty.
-
endmenu
source "arch/arm/common/Kconfig"
@@ -1416,7 +1391,7 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
- depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
+ depends on AEABI && EXPERIMENTAL
default y
help
This option preserves the old syscall interface along with the
diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile
index 6f7b29294c80..c22c1adfedd6 100644
--- a/trunk/arch/arm/Makefile
+++ b/trunk/arch/arm/Makefile
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
-OBJCOPYFLAGS :=-O binary -R .comment -S
+OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
diff --git a/trunk/arch/arm/boot/compressed/.gitignore b/trunk/arch/arm/boot/compressed/.gitignore
index c6028967d336..ab204db594d3 100644
--- a/trunk/arch/arm/boot/compressed/.gitignore
+++ b/trunk/arch/arm/boot/compressed/.gitignore
@@ -1,7 +1,3 @@
font.c
-lib1funcs.S
-piggy.gzip
-piggy.lzo
-piggy.lzma
-vmlinux
+piggy.gz
vmlinux.lds
diff --git a/trunk/arch/arm/common/Kconfig b/trunk/arch/arm/common/Kconfig
index ea5ee4d067f3..778655f0257a 100644
--- a/trunk/arch/arm/common/Kconfig
+++ b/trunk/arch/arm/common/Kconfig
@@ -6,8 +6,6 @@ config ARM_VIC
config ARM_VIC_NR
int
- default 4 if ARCH_S5PV210
- default 3 if ARCH_S5P6442 || ARCH_S5PC100
default 2
depends on ARM_VIC
help
diff --git a/trunk/arch/arm/include/asm/futex.h b/trunk/arch/arm/include/asm/futex.h
index 199a6b6de7f4..b33fe7065b38 100644
--- a/trunk/arch/arm/include/asm/futex.h
+++ b/trunk/arch/arm/include/asm/futex.h
@@ -35,7 +35,7 @@
: "cc", "memory")
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable(); /* implies preempt_disable() */
@@ -88,35 +88,36 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- int ret = 0;
- u32 val;
+ int val;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
+ pagefault_disable(); /* implies preempt_disable() */
+
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: " T(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
+ "1: " T(ldr) " %0, [%3]\n"
+ " teq %0, %1\n"
" it eq @ explicit IT needed for the 2b label\n"
- "2: " T(streq) " %3, [%4]\n"
+ "2: " T(streq) " %2, [%3]\n"
"3:\n"
" .pushsection __ex_table,\"a\"\n"
" .align 3\n"
" .long 1b, 4f, 2b, 4f\n"
" .popsection\n"
" .pushsection .fixup,\"ax\"\n"
- "4: mov %0, %5\n"
+ "4: mov %0, %4\n"
" b 3b\n"
" .popsection"
- : "+r" (ret), "=&r" (val)
+ : "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
- *uval = val;
- return ret;
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ return val;
}
#endif /* !SMP */
diff --git a/trunk/arch/arm/include/asm/hardware/cache-l2x0.h b/trunk/arch/arm/include/asm/hardware/cache-l2x0.h
index 16bd48031583..5aeec1e1735c 100644
--- a/trunk/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/trunk/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -36,7 +36,6 @@
#define L2X0_RAW_INTR_STAT 0x21C
#define L2X0_INTR_CLEAR 0x220
#define L2X0_CACHE_SYNC 0x730
-#define L2X0_DUMMY_REG 0x740
#define L2X0_INV_LINE_PA 0x770
#define L2X0_INV_WAY 0x77C
#define L2X0_CLEAN_LINE_PA 0x7B0
diff --git a/trunk/arch/arm/include/asm/hardware/sp810.h b/trunk/arch/arm/include/asm/hardware/sp810.h
index e0d1c0cfa548..721847dc68ab 100644
--- a/trunk/arch/arm/include/asm/hardware/sp810.h
+++ b/trunk/arch/arm/include/asm/hardware/sp810.h
@@ -58,9 +58,6 @@
static inline void sysctl_soft_reset(void __iomem *base)
{
- /* switch to slow mode */
- writel(0x2, base + SCCTRL);
-
/* writing any value to SCSYSSTAT reg will reset system */
writel(0, base + SCSYSSTAT);
}
diff --git a/trunk/arch/arm/include/asm/mach/arch.h b/trunk/arch/arm/include/asm/mach/arch.h
index bf13b814c1b8..3a0893a76a3b 100644
--- a/trunk/arch/arm/include/asm/mach/arch.h
+++ b/trunk/arch/arm/include/asm/mach/arch.h
@@ -15,6 +15,10 @@ struct meminfo;
struct sys_timer;
struct machine_desc {
+ /*
+ * Note! The first two elements are used
+ * by assembler code in head.S, head-common.S
+ */
unsigned int nr; /* architecture number */
const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */
diff --git a/trunk/arch/arm/include/asm/pgalloc.h b/trunk/arch/arm/include/asm/pgalloc.h
index 22de005f159c..9763be04f77e 100644
--- a/trunk/arch/arm/include/asm/pgalloc.h
+++ b/trunk/arch/arm/include/asm/pgalloc.h
@@ -10,8 +10,6 @@
#ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
-#include
-
#include
#include
#include
diff --git a/trunk/arch/arm/include/asm/tlb.h b/trunk/arch/arm/include/asm/tlb.h
index 82dfe5d0c41e..f41a6f57cd12 100644
--- a/trunk/arch/arm/include/asm/tlb.h
+++ b/trunk/arch/arm/include/asm/tlb.h
@@ -18,34 +18,16 @@
#define __ASMARM_TLB_H
#include
+#include
#ifndef CONFIG_MMU
#include
-
-#define tlb_flush(tlb) ((void) tlb)
-
#include
#else /* !CONFIG_MMU */
-#include
#include
-#include
-
-/*
- * We need to delay page freeing for SMP as other CPUs can access pages
- * which have been removed but not yet had their TLB entries invalidated.
- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
- * we need to apply this same delaying tactic to ensure correct operation.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
-#define tlb_fast_mode(tlb) 0
-#define FREE_PTE_NR 500
-#else
-#define tlb_fast_mode(tlb) 1
-#define FREE_PTE_NR 0
-#endif
/*
* TLB handling. This allows us to remove pages from the page
@@ -54,58 +36,12 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
- struct vm_area_struct *vma;
unsigned long range_start;
unsigned long range_end;
- unsigned int nr;
- struct page *pages[FREE_PTE_NR];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-/*
- * This is unnecessarily complex. There's three ways the TLB shootdown
- * code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- * tlb->vma will be NULL.
- */
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- if (tlb->fullmm || !tlb->vma)
- flush_tlb_mm(tlb->mm);
- else if (tlb->range_end > 0) {
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush(tlb);
- if (!tlb_fast_mode(tlb)) {
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- }
-}
-
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
@@ -113,8 +49,6 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
- tlb->vma = NULL;
- tlb->nr = 0;
return tlb;
}
@@ -122,7 +56,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- tlb_flush_mmu(tlb);
+ if (tlb->fullmm)
+ flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
@@ -136,7 +71,12 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
- tlb_add_flush(tlb, addr);
+ if (!tlb->fullmm) {
+ if (addr < tlb->range_start)
+ tlb->range_start = addr;
+ if (addr + PAGE_SIZE > tlb->range_end)
+ tlb->range_end = addr + PAGE_SIZE;
+ }
}
/*
@@ -149,7 +89,6 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
- tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
@@ -158,30 +97,12 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (tlb_fast_mode(tlb)) {
- free_page_and_swap_cache(page);
- } else {
- tlb->pages[tlb->nr++] = page;
- if (tlb->nr >= FREE_PTE_NR)
- tlb_flush_mmu(tlb);
- }
-}
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long addr)
-{
- pgtable_page_dtor(pte);
- tlb_add_flush(tlb, addr);
- tlb_remove_page(tlb, pte);
+ if (!tlb->fullmm && tlb->range_end > 0)
+ flush_tlb_range(vma, tlb->range_start, tlb->range_end);
}
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
+#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
+#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/trunk/arch/arm/include/asm/tlbflush.h b/trunk/arch/arm/include/asm/tlbflush.h
index d2005de383b8..ce7378ea15a2 100644
--- a/trunk/arch/arm/include/asm/tlbflush.h
+++ b/trunk/arch/arm/include/asm/tlbflush.h
@@ -10,7 +10,12 @@
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
-#ifdef CONFIG_MMU
+
+#ifndef CONFIG_MMU
+
+#define tlb_flush(tlb) ((void) tlb)
+
+#else /* CONFIG_MMU */
#include
diff --git a/trunk/arch/arm/kernel/head.S b/trunk/arch/arm/kernel/head.S
index f06ff9feb0db..c0225da3fb21 100644
--- a/trunk/arch/arm/kernel/head.S
+++ b/trunk/arch/arm/kernel/head.S
@@ -391,7 +391,6 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
- __INIT
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
@@ -416,7 +415,18 @@ __fixup_smp_on_up:
sub r3, r0, r3
add r4, r4, r3
add r5, r5, r3
- b __do_fixup_smp_on_up
+2: cmp r4, r5
+ movhs pc, lr
+ ldmia r4!, {r0, r6}
+ ARM( str r6, [r0, r3] )
+ THUMB( add r0, r0, r3 )
+#ifdef __ARMEB__
+ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
+#endif
+ THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
+ THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
+ THUMB( strh r6, [r0] )
+ b 2b
ENDPROC(__fixup_smp)
.align
@@ -430,31 +440,7 @@ smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
-#endif
- .text
-__do_fixup_smp_on_up:
- cmp r4, r5
- movhs pc, lr
- ldmia r4!, {r0, r6}
- ARM( str r6, [r0, r3] )
- THUMB( add r0, r0, r3 )
-#ifdef __ARMEB__
- THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
- THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
- THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
- THUMB( strh r6, [r0] )
- b __do_fixup_smp_on_up
-ENDPROC(__do_fixup_smp_on_up)
-
-ENTRY(fixup_smp)
- stmfd sp!, {r4 - r6, lr}
- mov r4, r0
- add r5, r0, r1
- mov r3, #0
- bl __do_fixup_smp_on_up
- ldmfd sp!, {r4 - r6, pc}
-ENDPROC(fixup_smp)
#include "head-common.S"
diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c
index 44b84fe6e1b0..c9f3f0467570 100644
--- a/trunk/arch/arm/kernel/hw_breakpoint.c
+++ b/trunk/arch/arm/kernel/hw_breakpoint.c
@@ -137,10 +137,11 @@ static u8 get_debug_arch(void)
u32 didr;
/* Do we implement the extended CPUID interface? */
- if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
- "CPUID feature registers not supported. "
- "Assuming v6 debug is present.\n"))
+ if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
+ pr_warning("CPUID feature registers not supported. "
+ "Assuming v6 debug is present.\n");
return ARM_DEBUG_ARCH_V6;
+ }
ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf;
@@ -151,12 +152,6 @@ u8 arch_get_debug_arch(void)
return debug_arch;
}
-static int debug_arch_supported(void)
-{
- u8 arch = get_debug_arch();
- return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
-}
-
/* Determine number of BRP register available. */
static int get_num_brp_resources(void)
{
@@ -273,9 +268,6 @@ static int enable_monitor_mode(void)
int hw_breakpoint_slots(int type)
{
- if (!debug_arch_supported())
- return 0;
-
/*
* We can be called early, so don't rely on
* our static variables being initialised.
@@ -836,32 +828,19 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
/*
* One-time initialisation.
*/
-static void reset_ctrl_regs(void *info)
+static void reset_ctrl_regs(void *unused)
{
- int i, cpu = smp_processor_id();
- u32 dbg_power;
- cpumask_t *cpumask = info;
+ int i;
/*
* v7 debug contains save and restore registers so that debug state
- * can be maintained across low-power modes without leaving the debug
- * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
- * the debug registers out of reset, so we must unlock the OS Lock
- * Access Register to avoid taking undefined instruction exceptions
- * later on.
+ * can be maintained across low-power modes without leaving
+ * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
+ * we can write to the debug registers out of reset, so we must
+ * unlock the OS Lock Access Register to avoid taking undefined
+ * instruction exceptions later on.
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
- /*
- * Ensure sticky power-down is clear (i.e. debug logic is
- * powered up).
- */
- asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
- if ((dbg_power & 0x1) == 0) {
- pr_warning("CPU %d debug is powered down!\n", cpu);
- cpumask_or(cpumask, cpumask, cpumask_of(cpu));
- return;
- }
-
/*
* Unconditionally clear the lock by writing a value
* other than 0xC5ACCE55 to the access register.
@@ -900,11 +879,10 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
static int __init arch_hw_breakpoint_init(void)
{
u32 dscr;
- cpumask_t cpumask = { CPU_BITS_NONE };
debug_arch = get_debug_arch();
- if (!debug_arch_supported()) {
+ if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
return 0;
}
@@ -921,24 +899,18 @@ static int __init arch_hw_breakpoint_init(void)
pr_info("%d breakpoint(s) reserved for watchpoint "
"single-step.\n", core_num_reserved_brps);
- /*
- * Reset the breakpoint resources. We assume that a halting
- * debugger will leave the world in a nice state for us.
- */
- on_each_cpu(reset_ctrl_regs, &cpumask, 1);
- if (!cpumask_empty(&cpumask)) {
- core_num_brps = 0;
- core_num_reserved_brps = 0;
- core_num_wrps = 0;
- return 0;
- }
-
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
- max_watchpoint_len = 4;
pr_warning("halting debug mode enabled. Assuming maximum "
- "watchpoint size of %u bytes.", max_watchpoint_len);
+ "watchpoint size of 4 bytes.");
} else {
+ /*
+ * Reset the breakpoint resources. We assume that a halting
+ * debugger will leave the world in a nice state for us.
+ */
+ smp_call_function(reset_ctrl_regs, NULL, 1);
+ reset_ctrl_regs(NULL);
+
/* Work out the maximum supported watchpoint length. */
max_watchpoint_len = get_max_wp_len();
pr_info("maximum watchpoint size is %u bytes.\n",
diff --git a/trunk/arch/arm/kernel/kprobes-decode.c b/trunk/arch/arm/kernel/kprobes-decode.c
index 8f6ed43861f1..2c1f0050c9c4 100644
--- a/trunk/arch/arm/kernel/kprobes-decode.c
+++ b/trunk/arch/arm/kernel/kprobes-decode.c
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return space_cccc_1100_010x(insn, asi);
- } else if ((insn & 0x0e000000) == 0x0c000000) {
+ } else if ((insn & 0x0e000000) == 0x0c400000) {
return space_cccc_110x(insn, asi);
diff --git a/trunk/arch/arm/kernel/module.c b/trunk/arch/arm/kernel/module.c
index 6d4105e6872f..2cfe8161b478 100644
--- a/trunk/arch/arm/kernel/module.c
+++ b/trunk/arch/arm/kernel/module.c
@@ -22,7 +22,6 @@
#include
#include
-#include
#include
#ifdef CONFIG_XIP_KERNEL
@@ -269,28 +268,12 @@ struct mod_unwind_map {
const Elf_Shdr *txt_sec;
};
-static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
- const Elf_Shdr *sechdrs, const char *name)
-{
- const Elf_Shdr *s, *se;
- const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-
- for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
- if (strcmp(name, secstrs + s->sh_name) == 0)
- return s;
-
- return NULL;
-}
-
-extern void fixup_smp(const void *, unsigned long);
-
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
- const Elf_Shdr * __maybe_unused s = NULL;
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
+ const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX];
int i;
@@ -332,9 +315,6 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
- s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
- if (s && !is_smp())
- fixup_smp((void *)s->sh_addr, s->sh_size);
return 0;
}
diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c
index d150ad1ccb5d..5efa2647a2fb 100644
--- a/trunk/arch/arm/kernel/perf_event.c
+++ b/trunk/arch/arm/kernel/perf_event.c
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail,
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
- if (tail + 1 >= buftail.fp)
+ if (tail >= buftail.fp)
return NULL;
return buftail.fp - 1;
diff --git a/trunk/arch/arm/kernel/pmu.c b/trunk/arch/arm/kernel/pmu.c
index 2c79eec19262..b8af96ea62e6 100644
--- a/trunk/arch/arm/kernel/pmu.c
+++ b/trunk/arch/arm/kernel/pmu.c
@@ -97,34 +97,28 @@ set_irq_affinity(int irq,
irq, cpu);
return err;
#else
- return -EINVAL;
+ return 0;
#endif
}
static int
init_cpu_pmu(void)
{
- int i, irqs, err = 0;
+ int i, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
- if (!pdev)
- return -ENODEV;
-
- irqs = pdev->num_resources;
-
- /*
- * If we have a single PMU interrupt that we can't shift, assume that
- * we're running on a uniprocessor machine and continue.
- */
- if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
- return 0;
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
- for (i = 0; i < irqs; ++i) {
+ for (i = 0; i < pdev->num_resources; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
+out:
return err;
}
diff --git a/trunk/arch/arm/kernel/ptrace.c b/trunk/arch/arm/kernel/ptrace.c
index b13e70f63d71..19c6816db61e 100644
--- a/trunk/arch/arm/kernel/ptrace.c
+++ b/trunk/arch/arm/kernel/ptrace.c
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1;
- if (num & 0x1)
- reg = bp->attr.bp_addr;
- else
+ if (idx & 0x1)
reg = encode_ctrl_reg(arch_ctrl);
+ else
+ reg = bp->attr.bp_addr;
}
put:
diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c
index 5ea4fb718b97..420b8d6485d6 100644
--- a/trunk/arch/arm/kernel/setup.c
+++ b/trunk/arch/arm/kernel/setup.c
@@ -226,8 +226,8 @@ int cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
asm("mrc p15, 0, %0, c0, c1, 4"
: "=r" (mmfr0));
- if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
- (mmfr0 & 0x000000f0) >= 0x00000030)
+ if ((mmfr0 & 0x0000000f) == 0x00000003 ||
+ (mmfr0 & 0x000000f0) == 0x00000030)
cpu_arch = CPU_ARCH_ARMv7;
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
(mmfr0 & 0x000000f0) == 0x00000020)
diff --git a/trunk/arch/arm/kernel/signal.c b/trunk/arch/arm/kernel/signal.c
index abaf8445ce25..907d5a620bca 100644
--- a/trunk/arch/arm/kernel/signal.c
+++ b/trunk/arch/arm/kernel/signal.c
@@ -474,9 +474,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
- unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
-
- cpsr |= PSR_ENDSTATE;
+ unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
diff --git a/trunk/arch/arm/kernel/time.c b/trunk/arch/arm/kernel/time.c
index 1ff46cabc7ef..3d76bf233734 100644
--- a/trunk/arch/arm/kernel/time.c
+++ b/trunk/arch/arm/kernel/time.c
@@ -107,7 +107,9 @@ void timer_tick(void)
{
profile_tick(CPU_PROFILING);
do_leds();
- xtime_update(1);
+ write_seqlock(&xtime_lock);
+ do_timer(1);
+ write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
diff --git a/trunk/arch/arm/kernel/vmlinux.lds.S b/trunk/arch/arm/kernel/vmlinux.lds.S
index 61462790757f..86b66f3f2031 100644
--- a/trunk/arch/arm/kernel/vmlinux.lds.S
+++ b/trunk/arch/arm/kernel/vmlinux.lds.S
@@ -21,12 +21,6 @@
#define ARM_CPU_KEEP(x)
#endif
-#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
-#define ARM_EXIT_KEEP(x) x
-#else
-#define ARM_EXIT_KEEP(x)
-#endif
-
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -49,7 +43,6 @@ SECTIONS
_sinittext = .;
HEAD_TEXT
INIT_TEXT
- ARM_EXIT_KEEP(EXIT_TEXT)
_einittext = .;
ARM_CPU_DISCARD(PROC_INFO)
__arch_info_begin = .;
@@ -74,7 +67,6 @@ SECTIONS
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
INIT_DATA
- ARM_EXIT_KEEP(EXIT_DATA)
#endif
}
@@ -170,7 +162,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
- ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
@@ -256,8 +247,6 @@ SECTIONS
}
#endif
- NOTES
-
BSS_SECTION(0, 0, 0)
_end = .;
diff --git a/trunk/arch/arm/mach-clps711x/include/mach/time.h b/trunk/arch/arm/mach-clps711x/include/mach/time.h
index 61fef9129c6a..8fe283ccd1f3 100644
--- a/trunk/arch/arm/mach-clps711x/include/mach/time.h
+++ b/trunk/arch/arm/mach-clps711x/include/mach/time.h
@@ -30,7 +30,7 @@ p720t_timer_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
do_leds();
- xtime_update(1);
+ do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(regs));
#endif
diff --git a/trunk/arch/arm/mach-davinci/cpufreq.c b/trunk/arch/arm/mach-davinci/cpufreq.c
index 4a68c2b1ec11..343de73161fa 100644
--- a/trunk/arch/arm/mach-davinci/cpufreq.c
+++ b/trunk/arch/arm/mach-davinci/cpufreq.c
@@ -132,7 +132,7 @@ static int davinci_target(struct cpufreq_policy *policy,
return ret;
}
-static int davinci_cpu_init(struct cpufreq_policy *policy)
+static int __init davinci_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
diff --git a/trunk/arch/arm/mach-davinci/devices-da8xx.c b/trunk/arch/arm/mach-davinci/devices-da8xx.c
index beda8a4133a0..9eec63070e0c 100644
--- a/trunk/arch/arm/mach-davinci/devices-da8xx.c
+++ b/trunk/arch/arm/mach-davinci/devices-da8xx.c
@@ -480,15 +480,8 @@ static struct platform_device da850_mcasp_device = {
.resource = da850_mcasp_resources,
};
-struct platform_device davinci_pcm_device = {
- .name = "davinci-pcm-audio",
- .id = -1,
-};
-
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
{
- platform_device_register(&davinci_pcm_device);
-
/* DA830/OMAP-L137 has 3 instances of McASP */
if (cpu_is_davinci_da830() && id == 1) {
da830_mcasp1_device.dev.platform_data = pdata;
diff --git a/trunk/arch/arm/mach-davinci/gpio-tnetv107x.c b/trunk/arch/arm/mach-davinci/gpio-tnetv107x.c
index 3fa3e2867e19..d10298620e2c 100644
--- a/trunk/arch/arm/mach-davinci/gpio-tnetv107x.c
+++ b/trunk/arch/arm/mach-davinci/gpio-tnetv107x.c
@@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags);
- gpio_reg_set_bit(regs->enable, gpio);
+ gpio_reg_set_bit(®s->enable, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags);
- gpio_reg_clear_bit(regs->enable, gpio);
+ gpio_reg_clear_bit(®s->enable, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
}
@@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags);
- gpio_reg_set_bit(regs->direction, gpio);
+ gpio_reg_set_bit(®s->direction, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
spin_lock_irqsave(&ctlr->lock, flags);
if (value)
- gpio_reg_set_bit(regs->data_out, gpio);
+ gpio_reg_set_bit(®s->data_out, gpio);
else
- gpio_reg_clear_bit(regs->data_out, gpio);
+ gpio_reg_clear_bit(®s->data_out, gpio);
- gpio_reg_clear_bit(regs->direction, gpio);
+ gpio_reg_clear_bit(®s->direction, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned gpio = chip->base + offset;
int ret;
- ret = gpio_reg_get_bit(regs->data_in, gpio);
+ ret = gpio_reg_get_bit(®s->data_in, gpio);
return ret ? 1 : 0;
}
@@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
spin_lock_irqsave(&ctlr->lock, flags);
if (value)
- gpio_reg_set_bit(regs->data_out, gpio);
+ gpio_reg_set_bit(®s->data_out, gpio);
else
- gpio_reg_clear_bit(regs->data_out, gpio);
+ gpio_reg_clear_bit(®s->data_out, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
}
diff --git a/trunk/arch/arm/mach-davinci/include/mach/clkdev.h b/trunk/arch/arm/mach-davinci/include/mach/clkdev.h
index 14a504887189..730c49d1ebd8 100644
--- a/trunk/arch/arm/mach-davinci/include/mach/clkdev.h
+++ b/trunk/arch/arm/mach-davinci/include/mach/clkdev.h
@@ -1,8 +1,6 @@
#ifndef __MACH_CLKDEV_H
#define __MACH_CLKDEV_H
-struct clk;
-
static inline int __clk_get(struct clk *clk)
{
return 1;
diff --git a/trunk/arch/arm/mach-omap2/clkt_dpll.c b/trunk/arch/arm/mach-omap2/clkt_dpll.c
index acb7ae5b0a25..337392c3f549 100644
--- a/trunk/arch/arm/mach-omap2/clkt_dpll.c
+++ b/trunk/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
- fint = clk->parent->rate / n;
+ fint = clk->parent->rate / (n + 1);
if (fint < DPLL_FINT_BAND1_MIN) {
pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/trunk/arch/arm/mach-omap2/mailbox.c b/trunk/arch/arm/mach-omap2/mailbox.c
index 24b88504df0f..394413dc7deb 100644
--- a/trunk/arch/arm/mach-omap2/mailbox.c
+++ b/trunk/arch/arm/mach-omap2/mailbox.c
@@ -193,12 +193,10 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq)
{
struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- if (!cpu_is_omap44xx())
- bit = mbox_read_reg(p->irqdisable) & ~bit;
-
- mbox_write_reg(bit, p->irqdisable);
+ u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
+ l = mbox_read_reg(p->irqdisable);
+ l &= ~bit;
+ mbox_write_reg(l, p->irqdisable);
}
static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
@@ -336,7 +334,7 @@ static struct omap_mbox mbox_iva_info = {
.priv = &omap2_mbox_iva_priv,
};
-struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
+struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
#endif
#if defined(CONFIG_ARCH_OMAP4)
diff --git a/trunk/arch/arm/mach-omap2/mux.c b/trunk/arch/arm/mach-omap2/mux.c
index 6c84659cf846..98148b6c36e9 100644
--- a/trunk/arch/arm/mach-omap2/mux.c
+++ b/trunk/arch/arm/mach-omap2/mux.c
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
- (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
+ (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
m, &omap_mux_dbg_signal_fops);
}
}
diff --git a/trunk/arch/arm/mach-omap2/pm-debug.c b/trunk/arch/arm/mach-omap2/pm-debug.c
index a5a83b358ddd..125f56591fb5 100644
--- a/trunk/arch/arm/mach-omap2/pm-debug.c
+++ b/trunk/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
}
- (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
+ (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
&enable_off_mode, &pm_dbg_option_fops);
- (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
+ (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
&sleep_while_idle, &pm_dbg_option_fops);
- (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
+ (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
- S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
+ S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
pm_dbg_init_done = 1;
diff --git a/trunk/arch/arm/mach-omap2/prcm_mpu44xx.h b/trunk/arch/arm/mach-omap2/prcm_mpu44xx.h
index 3300ff6e3cfe..729a644ce852 100644
--- a/trunk/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/trunk/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
/* PRCM_MPU clockdomain register offsets (from instance start) */
-#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
-#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
/*
diff --git a/trunk/arch/arm/mach-omap2/smartreflex.c b/trunk/arch/arm/mach-omap2/smartreflex.c
index 1a777e34d0c2..c37e823266d3 100644
--- a/trunk/arch/arm/mach-omap2/smartreflex.c
+++ b/trunk/arch/arm/mach-omap2/smartreflex.c
@@ -282,7 +282,6 @@ static int sr_late_init(struct omap_sr *sr_info)
dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
"interrupt handler. Smartreflex will"
"not function as desired\n", __func__);
- kfree(name);
kfree(sr_info);
return ret;
}
@@ -880,7 +879,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
ret = sr_late_init(sr_info);
if (ret) {
pr_warning("%s: Error in SR late init\n", __func__);
- goto err_release_region;
+ return ret;
}
}
@@ -891,20 +890,17 @@ static int __init omap_sr_probe(struct platform_device *pdev)
* not try to create rest of the debugfs entries.
*/
vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
- if (!vdd_dbg_dir) {
- ret = -EINVAL;
- goto err_release_region;
- }
+ if (!vdd_dbg_dir)
+ return -EINVAL;
dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
if (IS_ERR(dbg_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
__func__);
- ret = PTR_ERR(dbg_dir);
- goto err_release_region;
+ return PTR_ERR(dbg_dir);
}
- (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
+ (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
(void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
&sr_info->err_weight);
@@ -917,8 +913,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
if (IS_ERR(nvalue_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
"for n-values\n", __func__);
- ret = PTR_ERR(nvalue_dir);
- goto err_release_region;
+ return PTR_ERR(nvalue_dir);
}
omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -927,16 +922,24 @@ static int __init omap_sr_probe(struct platform_device *pdev)
" corresponding vdd vdd_%s. Cannot create debugfs"
"entries for n-values\n",
__func__, sr_info->voltdm->name);
- ret = -ENODATA;
- goto err_release_region;
+ return -ENODATA;
}
for (i = 0; i < sr_info->nvalue_count; i++) {
- char name[NVALUE_NAME_LEN + 1];
+ char *name;
+ char volt_name[32];
+
+ name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
+ if (!name) {
+ dev_err(&pdev->dev, "%s: Unable to allocate memory"
+ " for n-value directory name\n", __func__);
+ return -ENOMEM;
+ }
- snprintf(name, sizeof(name), "volt_%d",
- volt_data[i].volt_nominal);
- (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ strcpy(name, "volt_");
+ sprintf(volt_name, "%d", volt_data[i].volt_nominal);
+ strcat(name, volt_name);
+ (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
}
diff --git a/trunk/arch/arm/mach-omap2/timer-gp.c b/trunk/arch/arm/mach-omap2/timer-gp.c
index 0fc550e7e482..7b7c2683ae7b 100644
--- a/trunk/arch/arm/mach-omap2/timer-gp.c
+++ b/trunk/arch/arm/mach-omap2/timer-gp.c
@@ -39,7 +39,6 @@
#include
#include
#include
-#include
#include "timer-gp.h"
@@ -191,7 +190,6 @@ static void __init omap2_gp_clocksource_init(void)
/*
* clocksource
*/
-static DEFINE_CLOCK_DATA(cd);
static struct omap_dm_timer *gpt_clocksource;
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
@@ -206,15 +204,6 @@ static struct clocksource clocksource_gpt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void notrace dmtimer_update_sched_clock(void)
-{
- u32 cyc;
-
- cyc = omap_dm_timer_read_counter(gpt_clocksource);
-
- update_sched_clock(&cd, cyc, (u32)~0);
-}
-
/* Setup free-running counter for clocksource */
static void __init omap2_gp_clocksource_init(void)
{
@@ -235,8 +224,6 @@ static void __init omap2_gp_clocksource_init(void)
omap_dm_timer_set_load_start(gpt, 1, 0);
- init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
-
if (clocksource_register_hz(&clocksource_gpt, tick_rate))
printk(err2, clocksource_gpt.name);
}
diff --git a/trunk/arch/arm/mach-pxa/colibri-evalboard.c b/trunk/arch/arm/mach-pxa/colibri-evalboard.c
index 28f667e52ef9..6b2c800a1133 100644
--- a/trunk/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/trunk/arch/arm/mach-pxa/colibri-evalboard.c
@@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void)
GPIO0_COLIBRI_PXA270_SD_DETECT;
if (machine_is_colibri300()) /* PXA300 Colibri */
colibri_mci_platform_data.gpio_card_detect =
- GPIO13_COLIBRI_PXA300_SD_DETECT;
+ GPIO39_COLIBRI_PXA300_SD_DETECT;
else /* PXA320 Colibri */
colibri_mci_platform_data.gpio_card_detect =
GPIO28_COLIBRI_PXA320_SD_DETECT;
diff --git a/trunk/arch/arm/mach-pxa/colibri-pxa300.c b/trunk/arch/arm/mach-pxa/colibri-pxa300.c
index 66dd81cbc8a0..fddb16d07eb0 100644
--- a/trunk/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/trunk/arch/arm/mach-pxa/colibri-pxa300.c
@@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {
GPIO4_MMC1_DAT1,
GPIO5_MMC1_DAT2,
GPIO6_MMC1_DAT3,
- GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */
+ GPIO39_GPIO, /* SD detect */
/* UHC */
GPIO0_2_USBH_PEN,
diff --git a/trunk/arch/arm/mach-pxa/include/mach/colibri.h b/trunk/arch/arm/mach-pxa/include/mach/colibri.h
index cb4236e98a0f..388a96f1ef93 100644
--- a/trunk/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/trunk/arch/arm/mach-pxa/include/mach/colibri.h
@@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {}
#define GPIO113_COLIBRI_PXA270_TS_IRQ 113
/* GPIO definitions for Colibri PXA300/310 */
-#define GPIO13_COLIBRI_PXA300_SD_DETECT 13
+#define GPIO39_COLIBRI_PXA300_SD_DETECT 39
/* GPIO definitions for Colibri PXA320 */
#define GPIO28_COLIBRI_PXA320_SD_DETECT 28
diff --git a/trunk/arch/arm/mach-pxa/palm27x.c b/trunk/arch/arm/mach-pxa/palm27x.c
index 35572c427fa8..405b92a29793 100644
--- a/trunk/arch/arm/mach-pxa/palm27x.c
+++ b/trunk/arch/arm/mach-pxa/palm27x.c
@@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = {
.pwm_id = 0,
.max_brightness = 0xfe,
.dft_brightness = 0x7e,
- .pwm_period_ns = 3500 * 1024,
+ .pwm_period_ns = 3500,
.init = palm27x_backlight_init,
.notify = palm27x_backlight_notify,
.exit = palm27x_backlight_exit,
diff --git a/trunk/arch/arm/mach-pxa/pm.c b/trunk/arch/arm/mach-pxa/pm.c
index 1807c9abdde0..978e1b289544 100644
--- a/trunk/arch/arm/mach-pxa/pm.c
+++ b/trunk/arch/arm/mach-pxa/pm.c
@@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state)
#endif
/* skip registers saving for standby */
- if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) {
+ if (state != PM_SUSPEND_STANDBY) {
pxa_cpu_pm_fns->save(sleep_save);
/* before sleeping, calculate and save a checksum */
for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
@@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state)
pxa_cpu_pm_fns->enter(state);
cpu_init();
- if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
+ if (state != PM_SUSPEND_STANDBY) {
/* after sleeping, validate the checksum */
for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
checksum += sleep_save[i];
diff --git a/trunk/arch/arm/mach-pxa/pxa25x.c b/trunk/arch/arm/mach-pxa/pxa25x.c
index b166b1d845d7..fbc5b775f895 100644
--- a/trunk/arch/arm/mach-pxa/pxa25x.c
+++ b/trunk/arch/arm/mach-pxa/pxa25x.c
@@ -347,7 +347,6 @@ static struct platform_device *pxa25x_devices[] __initdata = {
&pxa25x_device_assp,
&pxa25x_device_pwm0,
&pxa25x_device_pwm1,
- &pxa_device_asoc_platform,
};
static struct sys_device pxa25x_sysdev[] = {
diff --git a/trunk/arch/arm/mach-pxa/tosa-bt.c b/trunk/arch/arm/mach-pxa/tosa-bt.c
index b9b1e5c2b290..c31e601eb49c 100644
--- a/trunk/arch/arm/mach-pxa/tosa-bt.c
+++ b/trunk/arch/arm/mach-pxa/tosa-bt.c
@@ -81,6 +81,8 @@ static int tosa_bt_probe(struct platform_device *dev)
goto err_rfk_alloc;
}
+ rfkill_set_led_trigger_name(rfk, "tosa-bt");
+
rc = rfkill_register(rfk);
if (rc)
goto err_rfkill;
diff --git a/trunk/arch/arm/mach-pxa/tosa.c b/trunk/arch/arm/mach-pxa/tosa.c
index f2582ec300d9..af152e70cfcf 100644
--- a/trunk/arch/arm/mach-pxa/tosa.c
+++ b/trunk/arch/arm/mach-pxa/tosa.c
@@ -875,11 +875,6 @@ static struct platform_device sharpsl_rom_device = {
.dev.platform_data = &sharpsl_rom_data,
};
-static struct platform_device wm9712_device = {
- .name = "wm9712-codec",
- .id = -1,
-};
-
static struct platform_device *devices[] __initdata = {
&tosascoop_device,
&tosascoop_jc_device,
@@ -890,7 +885,6 @@ static struct platform_device *devices[] __initdata = {
&tosaled_device,
&tosa_bt_device,
&sharpsl_rom_device,
- &wm9712_device,
};
static void tosa_poweroff(void)
diff --git a/trunk/arch/arm/mach-s3c2440/Kconfig b/trunk/arch/arm/mach-s3c2440/Kconfig
index 50825a3f91cc..a0cb2581894f 100644
--- a/trunk/arch/arm/mach-s3c2440/Kconfig
+++ b/trunk/arch/arm/mach-s3c2440/Kconfig
@@ -99,7 +99,6 @@ config MACH_NEO1973_GTA02
select POWER_SUPPLY
select MACH_NEO1973
select S3C2410_PWM
- select S3C_DEV_USB_HOST
help
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
diff --git a/trunk/arch/arm/mach-s3c2440/include/mach/gta02.h b/trunk/arch/arm/mach-s3c2440/include/mach/gta02.h
index 3a56a229cac6..953331d8d56a 100644
--- a/trunk/arch/arm/mach-s3c2440/include/mach/gta02.h
+++ b/trunk/arch/arm/mach-s3c2440/include/mach/gta02.h
@@ -44,19 +44,19 @@
#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
-#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
-#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
-#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
-#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
-#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
-#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
-#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
-#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
-#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
-#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2
+#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4
+#define GTA02_GPIO_3D_RESET S3C2440_GPJ5
+#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7
+#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8
+#define GTA02_GPIO_KEEPACT S3C2440_GPJ8
+#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10
+#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */
#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
#define GTA02_IRQ_MODEM IRQ_EINT1
diff --git a/trunk/arch/arm/mach-s3c64xx/clock.c b/trunk/arch/arm/mach-s3c64xx/clock.c
index fdfc4d5e37a1..dd3782064508 100644
--- a/trunk/arch/arm/mach-s3c64xx/clock.c
+++ b/trunk/arch/arm/mach-s3c64xx/clock.c
@@ -150,12 +150,6 @@ static struct clk init_clocks_off[] = {
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_IIC,
- }, {
- .name = "i2c",
- .id = 1,
- .parent = &clk_p,
- .enable = s3c64xx_pclk_ctrl,
- .ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
}, {
.name = "iis",
.id = 0,
diff --git a/trunk/arch/arm/mach-s3c64xx/dma.c b/trunk/arch/arm/mach-s3c64xx/dma.c
index c35585cf8c4f..135db1b41252 100644
--- a/trunk/arch/arm/mach-s3c64xx/dma.c
+++ b/trunk/arch/arm/mach-s3c64xx/dma.c
@@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
regptr = regs + PL080_Cx_BASE(0);
- for (ch = 0; ch < 8; ch++, chptr++) {
- pr_debug("%s: registering DMA %d (%p)\n",
- __func__, chno + ch, regptr);
+ for (ch = 0; ch < 8; ch++, chno++, chptr++) {
+ printk(KERN_INFO "%s: registering DMA %d (%p)\n",
+ __func__, chno, regptr);
chptr->bit = 1 << ch;
- chptr->number = chno + ch;
+ chptr->number = chno;
chptr->dmac = dmac;
chptr->regs = regptr;
regptr += PL080_Cx_STRIDE;
@@ -704,8 +704,7 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
/* for the moment, permanently enable the controller */
writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
- printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
- irq, regs, chno, chno+8);
+ printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
return 0;
diff --git a/trunk/arch/arm/mach-s3c64xx/gpiolib.c b/trunk/arch/arm/mach-s3c64xx/gpiolib.c
index 92b09085caaa..fd99a82e82c4 100644
--- a/trunk/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/trunk/arch/arm/mach-s3c64xx/gpiolib.c
@@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
.get_pull = s3c_gpio_getpull_updown,
};
-static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
+int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
{
return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
}
@@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
},
};
-static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
+int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
{
return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
}
diff --git a/trunk/arch/arm/mach-s3c64xx/mach-smdk6410.c b/trunk/arch/arm/mach-s3c64xx/mach-smdk6410.c
index a80a3163dd30..e85192a86fbe 100644
--- a/trunk/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/trunk/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -28,7 +28,6 @@
#include
#include
#include
-#include
#ifdef CONFIG_SMDK6410_WM1190_EV1
#include
@@ -352,7 +351,7 @@ static struct regulator_init_data smdk6410_vddpll = {
/* VDD_UH_MMC, LDO5 on J5 */
static struct regulator_init_data smdk6410_vdduh_mmc = {
.constraints = {
- .name = "PVDD_UH+PVDD_MMC",
+ .name = "PVDD_UH/PVDD_MMC",
.always_on = 1,
},
};
@@ -418,7 +417,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
/* S3C64xx internal logic & PLL */
static struct regulator_init_data wm8350_dcdc1_data = {
.constraints = {
- .name = "PVDD_INT+PVDD_PLL",
+ .name = "PVDD_INT/PVDD_PLL",
.min_uV = 1200000,
.max_uV = 1200000,
.always_on = 1,
@@ -453,7 +452,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
static struct regulator_init_data wm8350_dcdc4_data = {
.constraints = {
- .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
+ .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV",
.min_uV = 3000000,
.max_uV = 3000000,
.always_on = 1,
@@ -465,7 +464,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
/* OTGi/1190-EV1 HPVDD & AVDD */
static struct regulator_init_data wm8350_ldo4_data = {
.constraints = {
- .name = "PVDD_OTGI+HPVDD+AVDD",
+ .name = "PVDD_OTGI/HPVDD/AVDD",
.min_uV = 1200000,
.max_uV = 1200000,
.apply_uV = 1,
@@ -553,7 +552,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
static struct regulator_init_data wm1192_dcdc3 = {
.constraints = {
- .name = "PVDD_MEM+PVDD_GPS",
+ .name = "PVDD_MEM/PVDD_GPS",
.always_on = 1,
},
};
@@ -564,7 +563,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
static struct regulator_init_data wm1192_ldo1 = {
.constraints = {
- .name = "PVDD_LCD+PVDD_EXT",
+ .name = "PVDD_LCD/PVDD_EXT",
.always_on = 1,
},
.consumer_supplies = wm1192_ldo1_consumers,
diff --git a/trunk/arch/arm/mach-s3c64xx/setup-keypad.c b/trunk/arch/arm/mach-s3c64xx/setup-keypad.c
index 1d4d0ee9e870..f8ed0d22db70 100644
--- a/trunk/arch/arm/mach-s3c64xx/setup-keypad.c
+++ b/trunk/arch/arm/mach-s3c64xx/setup-keypad.c
@@ -17,7 +17,7 @@
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
{
/* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
- s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
+ s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3));
/* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
diff --git a/trunk/arch/arm/mach-s3c64xx/setup-sdhci.c b/trunk/arch/arm/mach-s3c64xx/setup-sdhci.c
index f344a222bc84..1a942037c4ef 100644
--- a/trunk/arch/arm/mach-s3c64xx/setup-sdhci.c
+++ b/trunk/arch/arm/mach-s3c64xx/setup-sdhci.c
@@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
else
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
- pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
+ printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
writel(ctrl2, r + S3C_SDHCI_CONTROL2);
writel(ctrl3, r + S3C_SDHCI_CONTROL3);
}
diff --git a/trunk/arch/arm/mach-s5p6442/include/mach/map.h b/trunk/arch/arm/mach-s5p6442/include/mach/map.h
index 058dab4482a1..203dd5a18bd5 100644
--- a/trunk/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/trunk/arch/arm/mach-s5p6442/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5p6442/include/mach/map.h
*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5P6442 - Memory map definitions
@@ -16,61 +16,56 @@
#include
#include
-#define S5P6442_PA_SDRAM 0x20000000
-
-#define S5P6442_PA_I2S0 0xC0B00000
-#define S5P6442_PA_I2S1 0xF2200000
-
-#define S5P6442_PA_CHIPID 0xE0000000
+#define S5P6442_PA_CHIPID (0xE0000000)
+#define S5P_PA_CHIPID S5P6442_PA_CHIPID
-#define S5P6442_PA_SYSCON 0xE0100000
+#define S5P6442_PA_SYSCON (0xE0100000)
+#define S5P_PA_SYSCON S5P6442_PA_SYSCON
-#define S5P6442_PA_GPIO 0xE0200000
+#define S5P6442_PA_GPIO (0xE0200000)
-#define S5P6442_PA_VIC0 0xE4000000
-#define S5P6442_PA_VIC1 0xE4100000
-#define S5P6442_PA_VIC2 0xE4200000
+#define S5P6442_PA_VIC0 (0xE4000000)
+#define S5P6442_PA_VIC1 (0xE4100000)
+#define S5P6442_PA_VIC2 (0xE4200000)
-#define S5P6442_PA_SROMC 0xE7000000
+#define S5P6442_PA_SROMC (0xE7000000)
+#define S5P_PA_SROMC S5P6442_PA_SROMC
#define S5P6442_PA_MDMA 0xE8000000
#define S5P6442_PA_PDMA 0xE9000000
-#define S5P6442_PA_TIMER 0xEA000000
+#define S5P6442_PA_TIMER (0xEA000000)
+#define S5P_PA_TIMER S5P6442_PA_TIMER
-#define S5P6442_PA_SYSTIMER 0xEA100000
+#define S5P6442_PA_SYSTIMER (0xEA100000)
-#define S5P6442_PA_WATCHDOG 0xEA200000
+#define S5P6442_PA_WATCHDOG (0xEA200000)
-#define S5P6442_PA_UART 0xEC000000
+#define S5P6442_PA_UART (0xEC000000)
-#define S5P6442_PA_IIC0 0xEC100000
+#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0)
+#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400)
+#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800)
+#define S5P_SZ_UART SZ_256
+
+#define S5P6442_PA_IIC0 (0xEC100000)
+
+#define S5P6442_PA_SDRAM (0x20000000)
+#define S5P_PA_SDRAM S5P6442_PA_SDRAM
#define S5P6442_PA_SPI 0xEC300000
+/* I2S */
+#define S5P6442_PA_I2S0 0xC0B00000
+#define S5P6442_PA_I2S1 0xF2200000
+
+/* PCM */
#define S5P6442_PA_PCM0 0xF2400000
#define S5P6442_PA_PCM1 0xF2500000
-/* Compatibiltiy Defines */
-
-#define S3C_PA_IIC S5P6442_PA_IIC0
+/* compatibiltiy defines. */
#define S3C_PA_WDT S5P6442_PA_WATCHDOG
-
-#define S5P_PA_CHIPID S5P6442_PA_CHIPID
-#define S5P_PA_SDRAM S5P6442_PA_SDRAM
-#define S5P_PA_SROMC S5P6442_PA_SROMC
-#define S5P_PA_SYSCON S5P6442_PA_SYSCON
-#define S5P_PA_TIMER S5P6442_PA_TIMER
-
-/* UART */
-
#define S3C_PA_UART S5P6442_PA_UART
-
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-
-#define S5P_SZ_UART SZ_256
+#define S3C_PA_IIC S5P6442_PA_IIC0
#endif /* __ASM_ARCH_MAP_H */
diff --git a/trunk/arch/arm/mach-s5p64x0/include/mach/gpio.h b/trunk/arch/arm/mach-s5p64x0/include/mach/gpio.h
index adb5f298ead8..5486c8f01f1d 100644
--- a/trunk/arch/arm/mach-s5p64x0/include/mach/gpio.h
+++ b/trunk/arch/arm/mach-s5p64x0/include/mach/gpio.h
@@ -23,7 +23,7 @@
#define S5P6440_GPIO_A_NR (6)
#define S5P6440_GPIO_B_NR (7)
#define S5P6440_GPIO_C_NR (8)
-#define S5P6440_GPIO_F_NR (16)
+#define S5P6440_GPIO_F_NR (2)
#define S5P6440_GPIO_G_NR (7)
#define S5P6440_GPIO_H_NR (10)
#define S5P6440_GPIO_I_NR (16)
@@ -36,7 +36,7 @@
#define S5P6450_GPIO_B_NR (7)
#define S5P6450_GPIO_C_NR (8)
#define S5P6450_GPIO_D_NR (8)
-#define S5P6450_GPIO_F_NR (16)
+#define S5P6450_GPIO_F_NR (2)
#define S5P6450_GPIO_G_NR (14)
#define S5P6450_GPIO_H_NR (10)
#define S5P6450_GPIO_I_NR (16)
diff --git a/trunk/arch/arm/mach-s5p64x0/include/mach/map.h b/trunk/arch/arm/mach-s5p64x0/include/mach/map.h
index 95c91257c7ca..a9365e5ba614 100644
--- a/trunk/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/trunk/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5p64x0/include/mach/map.h
*
- * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P64X0 - Memory map definitions
@@ -16,46 +16,64 @@
#include
#include
-#define S5P64X0_PA_SDRAM 0x20000000
+#define S5P64X0_PA_SDRAM (0x20000000)
-#define S5P64X0_PA_CHIPID 0xE0000000
+#define S5P64X0_PA_CHIPID (0xE0000000)
+#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
+
+#define S5P64X0_PA_SYSCON (0xE0100000)
+#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
+
+#define S5P64X0_PA_GPIO (0xE0308000)
+
+#define S5P64X0_PA_VIC0 (0xE4000000)
+#define S5P64X0_PA_VIC1 (0xE4100000)
-#define S5P64X0_PA_SYSCON 0xE0100000
+#define S5P64X0_PA_SROMC (0xE7000000)
+#define S5P_PA_SROMC S5P64X0_PA_SROMC
+
+#define S5P64X0_PA_PDMA (0xE9000000)
+
+#define S5P64X0_PA_TIMER (0xEA000000)
+#define S5P_PA_TIMER S5P64X0_PA_TIMER
-#define S5P64X0_PA_GPIO 0xE0308000
+#define S5P64X0_PA_RTC (0xEA100000)
-#define S5P64X0_PA_VIC0 0xE4000000
-#define S5P64X0_PA_VIC1 0xE4100000
+#define S5P64X0_PA_WDT (0xEA200000)
-#define S5P64X0_PA_SROMC 0xE7000000
+#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
+#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
-#define S5P64X0_PA_PDMA 0xE9000000
+#define S5P_PA_UART0 S5P6450_PA_UART(0)
+#define S5P_PA_UART1 S5P6450_PA_UART(1)
+#define S5P_PA_UART2 S5P6450_PA_UART(2)
+#define S5P_PA_UART3 S5P6450_PA_UART(3)
+#define S5P_PA_UART4 S5P6450_PA_UART(4)
+#define S5P_PA_UART5 S5P6450_PA_UART(5)
-#define S5P64X0_PA_TIMER 0xEA000000
-#define S5P64X0_PA_RTC 0xEA100000
-#define S5P64X0_PA_WDT 0xEA200000
+#define S5P_SZ_UART SZ_256
-#define S5P6440_PA_IIC0 0xEC104000
-#define S5P6440_PA_IIC1 0xEC20F000
-#define S5P6450_PA_IIC0 0xEC100000
-#define S5P6450_PA_IIC1 0xEC200000
+#define S5P6440_PA_IIC0 (0xEC104000)
+#define S5P6440_PA_IIC1 (0xEC20F000)
+#define S5P6450_PA_IIC0 (0xEC100000)
+#define S5P6450_PA_IIC1 (0xEC200000)
-#define S5P64X0_PA_SPI0 0xEC400000
-#define S5P64X0_PA_SPI1 0xEC500000
+#define S5P64X0_PA_SPI0 (0xEC400000)
+#define S5P64X0_PA_SPI1 (0xEC500000)
-#define S5P64X0_PA_HSOTG 0xED100000
+#define S5P64X0_PA_HSOTG (0xED100000)
#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
-#define S5P64X0_PA_I2S 0xF2000000
+#define S5P64X0_PA_I2S (0xF2000000)
#define S5P6450_PA_I2S1 0xF2800000
#define S5P6450_PA_I2S2 0xF2900000
-#define S5P64X0_PA_PCM 0xF2100000
+#define S5P64X0_PA_PCM (0xF2100000)
-#define S5P64X0_PA_ADC 0xF3000000
+#define S5P64X0_PA_ADC (0xF3000000)
-/* Compatibiltiy Defines */
+/* compatibiltiy defines. */
#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1)
@@ -65,25 +83,6 @@
#define S3C_PA_RTC S5P64X0_PA_RTC
#define S3C_PA_WDT S5P64X0_PA_WDT
-#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
-#define S5P_PA_SROMC S5P64X0_PA_SROMC
-#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
-#define S5P_PA_TIMER S5P64X0_PA_TIMER
-
#define SAMSUNG_PA_ADC S5P64X0_PA_ADC
-/* UART */
-
-#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
-#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
-
-#define S5P_PA_UART0 S5P6450_PA_UART(0)
-#define S5P_PA_UART1 S5P6450_PA_UART(1)
-#define S5P_PA_UART2 S5P6450_PA_UART(2)
-#define S5P_PA_UART3 S5P6450_PA_UART(3)
-#define S5P_PA_UART4 S5P6450_PA_UART(4)
-#define S5P_PA_UART5 S5P6450_PA_UART(5)
-
-#define S5P_SZ_UART SZ_256
-
#endif /* __ASM_ARCH_MAP_H */
diff --git a/trunk/arch/arm/mach-s5pc100/include/mach/map.h b/trunk/arch/arm/mach-s5pc100/include/mach/map.h
index ccbe6b767f7d..328467b346aa 100644
--- a/trunk/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/trunk/arch/arm/mach-s5pc100/include/mach/map.h
@@ -1,7 +1,4 @@
/* linux/arch/arm/mach-s5pc100/include/mach/map.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min
@@ -19,115 +16,145 @@
#include
#include
-#define S5PC100_PA_SDRAM 0x20000000
-
-#define S5PC100_PA_ONENAND 0xE7100000
-#define S5PC100_PA_ONENAND_BUF 0xB0000000
-
-#define S5PC100_PA_CHIPID 0xE0000000
+/*
+ * map-base.h has already defined virtual memory address
+ * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s)
+ * S3C_VA_SYS S3C_ADDR(0x00100000) system control
+ * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used)
+ * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block
+ * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog
+ * S3C_VA_UART S3C_ADDR(0x01000000) UART
+ *
+ * S5PC100 specific virtual memory address can be defined here
+ * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO
+ *
+ */
-#define S5PC100_PA_SYSCON 0xE0100000
+#define S5PC100_PA_ONENAND_BUF (0xB0000000)
+#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
-#define S5PC100_PA_OTHERS 0xE0200000
+/* Chip ID */
-#define S5PC100_PA_GPIO 0xE0300000
+#define S5PC100_PA_CHIPID (0xE0000000)
+#define S5P_PA_CHIPID S5PC100_PA_CHIPID
-#define S5PC100_PA_VIC0 0xE4000000
-#define S5PC100_PA_VIC1 0xE4100000
-#define S5PC100_PA_VIC2 0xE4200000
+#define S5PC100_PA_SYSCON (0xE0100000)
+#define S5P_PA_SYSCON S5PC100_PA_SYSCON
-#define S5PC100_PA_SROMC 0xE7000000
+#define S5PC100_PA_OTHERS (0xE0200000)
+#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
-#define S5PC100_PA_CFCON 0xE7800000
+#define S5PC100_PA_GPIO (0xE0300000)
+#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000)
-#define S5PC100_PA_MDMA 0xE8100000
-#define S5PC100_PA_PDMA0 0xE9000000
-#define S5PC100_PA_PDMA1 0xE9200000
+/* Interrupt */
+#define S5PC100_PA_VIC0 (0xE4000000)
+#define S5PC100_PA_VIC1 (0xE4100000)
+#define S5PC100_PA_VIC2 (0xE4200000)
+#define S5PC100_VA_VIC S3C_VA_IRQ
+#define S5PC100_VA_VIC_OFFSET 0x10000
+#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
-#define S5PC100_PA_TIMER 0xEA000000
-#define S5PC100_PA_SYSTIMER 0xEA100000
-#define S5PC100_PA_WATCHDOG 0xEA200000
-#define S5PC100_PA_RTC 0xEA300000
+#define S5PC100_PA_SROMC (0xE7000000)
+#define S5P_PA_SROMC S5PC100_PA_SROMC
-#define S5PC100_PA_UART 0xEC000000
+#define S5PC100_PA_ONENAND (0xE7100000)
-#define S5PC100_PA_IIC0 0xEC100000
-#define S5PC100_PA_IIC1 0xEC200000
+#define S5PC100_PA_CFCON (0xE7800000)
-#define S5PC100_PA_SPI0 0xEC300000
-#define S5PC100_PA_SPI1 0xEC400000
-#define S5PC100_PA_SPI2 0xEC500000
+/* DMA */
+#define S5PC100_PA_MDMA (0xE8100000)
+#define S5PC100_PA_PDMA0 (0xE9000000)
+#define S5PC100_PA_PDMA1 (0xE9200000)
-#define S5PC100_PA_USB_HSOTG 0xED200000
-#define S5PC100_PA_USB_HSPHY 0xED300000
+/* Timer */
+#define S5PC100_PA_TIMER (0xEA000000)
+#define S5P_PA_TIMER S5PC100_PA_TIMER
-#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
+#define S5PC100_PA_SYSTIMER (0xEA100000)
-#define S5PC100_PA_FB 0xEE000000
+#define S5PC100_PA_WATCHDOG (0xEA200000)
+#define S5PC100_PA_RTC (0xEA300000)
-#define S5PC100_PA_FIMC0 0xEE200000
-#define S5PC100_PA_FIMC1 0xEE300000
-#define S5PC100_PA_FIMC2 0xEE400000
+#define S5PC100_PA_UART (0xEC000000)
-#define S5PC100_PA_I2S0 0xF2000000
-#define S5PC100_PA_I2S1 0xF2100000
-#define S5PC100_PA_I2S2 0xF2200000
+#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0)
+#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400)
+#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800)
+#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00)
+#define S5P_SZ_UART SZ_256
-#define S5PC100_PA_AC97 0xF2300000
+#define S5PC100_PA_IIC0 (0xEC100000)
+#define S5PC100_PA_IIC1 (0xEC200000)
-#define S5PC100_PA_PCM0 0xF2400000
-#define S5PC100_PA_PCM1 0xF2500000
+/* SPI */
+#define S5PC100_PA_SPI0 0xEC300000
+#define S5PC100_PA_SPI1 0xEC400000
+#define S5PC100_PA_SPI2 0xEC500000
-#define S5PC100_PA_SPDIF 0xF2600000
+/* USB HS OTG */
+#define S5PC100_PA_USB_HSOTG (0xED200000)
+#define S5PC100_PA_USB_HSPHY (0xED300000)
-#define S5PC100_PA_TSADC 0xF3000000
+#define S5PC100_PA_FB (0xEE000000)
-#define S5PC100_PA_KEYPAD 0xF3100000
+#define S5PC100_PA_FIMC0 (0xEE200000)
+#define S5PC100_PA_FIMC1 (0xEE300000)
+#define S5PC100_PA_FIMC2 (0xEE400000)
-/* Compatibiltiy Defines */
+#define S5PC100_PA_I2S0 (0xF2000000)
+#define S5PC100_PA_I2S1 (0xF2100000)
+#define S5PC100_PA_I2S2 (0xF2200000)
-#define S3C_PA_FB S5PC100_PA_FB
-#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
-#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
-#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
-#define S3C_PA_IIC S5PC100_PA_IIC0
-#define S3C_PA_IIC1 S5PC100_PA_IIC1
-#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
-#define S3C_PA_ONENAND S5PC100_PA_ONENAND
-#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
-#define S3C_PA_RTC S5PC100_PA_RTC
-#define S3C_PA_TSADC S5PC100_PA_TSADC
-#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
-#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
-#define S3C_PA_WDT S5PC100_PA_WATCHDOG
+#define S5PC100_PA_AC97 0xF2300000
-#define S5P_PA_CHIPID S5PC100_PA_CHIPID
-#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
-#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
-#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
-#define S5P_PA_SDRAM S5PC100_PA_SDRAM
-#define S5P_PA_SROMC S5PC100_PA_SROMC
-#define S5P_PA_SYSCON S5PC100_PA_SYSCON
-#define S5P_PA_TIMER S5PC100_PA_TIMER
+/* PCM */
+#define S5PC100_PA_PCM0 0xF2400000
+#define S5PC100_PA_PCM1 0xF2500000
-#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
-#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
-#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
+#define S5PC100_PA_SPDIF 0xF2600000
-#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
+#define S5PC100_PA_TSADC (0xF3000000)
-#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
+/* KEYPAD */
+#define S5PC100_PA_KEYPAD (0xF3100000)
-/* UART */
+#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
-#define S3C_PA_UART S5PC100_PA_UART
+#define S5PC100_PA_SDRAM (0x20000000)
+#define S5P_PA_SDRAM S5PC100_PA_SDRAM
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-#define S5P_PA_UART3 S5P_PA_UART(3)
+/* compatibiltiy defines. */
+#define S3C_PA_UART S5PC100_PA_UART
+#define S3C_PA_IIC S5PC100_PA_IIC0
+#define S3C_PA_IIC1 S5PC100_PA_IIC1
+#define S3C_PA_FB S5PC100_PA_FB
+#define S3C_PA_G2D S5PC100_PA_G2D
+#define S3C_PA_G3D S5PC100_PA_G3D
+#define S3C_PA_JPEG S5PC100_PA_JPEG
+#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR
+#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0)
+#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1)
+#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2)
+#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
+#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
+#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
+#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
+#define S3C_PA_WDT S5PC100_PA_WATCHDOG
+#define S3C_PA_TSADC S5PC100_PA_TSADC
+#define S3C_PA_ONENAND S5PC100_PA_ONENAND
+#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
+#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF
+#define S3C_PA_RTC S5PC100_PA_RTC
+
+#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
+#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
+#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
-#define S5P_SZ_UART SZ_256
+#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
+#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
+#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
-#endif /* __ASM_ARCH_MAP_H */
+#endif /* __ASM_ARCH_C100_MAP_H */
diff --git a/trunk/arch/arm/mach-s5pv210/include/mach/map.h b/trunk/arch/arm/mach-s5pv210/include/mach/map.h
index 1dd58836fd4f..3611492ad681 100644
--- a/trunk/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/trunk/arch/arm/mach-s5pv210/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5pv210/include/mach/map.h
*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV210 - Memory map definitions
@@ -16,120 +16,122 @@
#include
#include
-#define S5PV210_PA_SDRAM 0x20000000
+#define S5PV210_PA_SROM_BANK5 (0xA8000000)
-#define S5PV210_PA_SROM_BANK5 0xA8000000
+#define S5PC110_PA_ONENAND (0xB0000000)
+#define S5P_PA_ONENAND S5PC110_PA_ONENAND
-#define S5PC110_PA_ONENAND 0xB0000000
-#define S5PC110_PA_ONENAND_DMA 0xB0600000
+#define S5PC110_PA_ONENAND_DMA (0xB0600000)
+#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
-#define S5PV210_PA_CHIPID 0xE0000000
+#define S5PV210_PA_CHIPID (0xE0000000)
+#define S5P_PA_CHIPID S5PV210_PA_CHIPID
-#define S5PV210_PA_SYSCON 0xE0100000
+#define S5PV210_PA_SYSCON (0xE0100000)
+#define S5P_PA_SYSCON S5PV210_PA_SYSCON
-#define S5PV210_PA_GPIO 0xE0200000
+#define S5PV210_PA_GPIO (0xE0200000)
-#define S5PV210_PA_SPDIF 0xE1100000
+/* SPI */
+#define S5PV210_PA_SPI0 0xE1300000
+#define S5PV210_PA_SPI1 0xE1400000
-#define S5PV210_PA_SPI0 0xE1300000
-#define S5PV210_PA_SPI1 0xE1400000
+#define S5PV210_PA_KEYPAD (0xE1600000)
-#define S5PV210_PA_KEYPAD 0xE1600000
+#define S5PV210_PA_IIC0 (0xE1800000)
+#define S5PV210_PA_IIC1 (0xFAB00000)
+#define S5PV210_PA_IIC2 (0xE1A00000)
-#define S5PV210_PA_ADC 0xE1700000
+#define S5PV210_PA_TIMER (0xE2500000)
+#define S5P_PA_TIMER S5PV210_PA_TIMER
-#define S5PV210_PA_IIC0 0xE1800000
-#define S5PV210_PA_IIC1 0xFAB00000
-#define S5PV210_PA_IIC2 0xE1A00000
+#define S5PV210_PA_SYSTIMER (0xE2600000)
-#define S5PV210_PA_AC97 0xE2200000
+#define S5PV210_PA_WATCHDOG (0xE2700000)
-#define S5PV210_PA_PCM0 0xE2300000
-#define S5PV210_PA_PCM1 0xE1200000
-#define S5PV210_PA_PCM2 0xE2B00000
+#define S5PV210_PA_RTC (0xE2800000)
+#define S5PV210_PA_UART (0xE2900000)
-#define S5PV210_PA_TIMER 0xE2500000
-#define S5PV210_PA_SYSTIMER 0xE2600000
-#define S5PV210_PA_WATCHDOG 0xE2700000
-#define S5PV210_PA_RTC 0xE2800000
+#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0)
+#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400)
+#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800)
+#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00)
-#define S5PV210_PA_UART 0xE2900000
+#define S5P_SZ_UART SZ_256
-#define S5PV210_PA_SROMC 0xE8000000
+#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
-#define S5PV210_PA_CFCON 0xE8200000
+#define S5PV210_PA_SROMC (0xE8000000)
+#define S5P_PA_SROMC S5PV210_PA_SROMC
-#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
+#define S5PV210_PA_CFCON (0xE8200000)
-#define S5PV210_PA_HSOTG 0xEC000000
-#define S5PV210_PA_HSPHY 0xEC100000
+#define S5PV210_PA_MDMA 0xFA200000
+#define S5PV210_PA_PDMA0 0xE0900000
+#define S5PV210_PA_PDMA1 0xE0A00000
-#define S5PV210_PA_IIS0 0xEEE30000
-#define S5PV210_PA_IIS1 0xE2100000
-#define S5PV210_PA_IIS2 0xE2A00000
+#define S5PV210_PA_FB (0xF8000000)
-#define S5PV210_PA_DMC0 0xF0000000
-#define S5PV210_PA_DMC1 0xF1400000
+#define S5PV210_PA_FIMC0 (0xFB200000)
+#define S5PV210_PA_FIMC1 (0xFB300000)
+#define S5PV210_PA_FIMC2 (0xFB400000)
-#define S5PV210_PA_VIC0 0xF2000000
-#define S5PV210_PA_VIC1 0xF2100000
-#define S5PV210_PA_VIC2 0xF2200000
-#define S5PV210_PA_VIC3 0xF2300000
+#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
-#define S5PV210_PA_FB 0xF8000000
+#define S5PV210_PA_HSOTG (0xEC000000)
+#define S5PV210_PA_HSPHY (0xEC100000)
-#define S5PV210_PA_MDMA 0xFA200000
-#define S5PV210_PA_PDMA0 0xE0900000
-#define S5PV210_PA_PDMA1 0xE0A00000
+#define S5PV210_PA_VIC0 (0xF2000000)
+#define S5PV210_PA_VIC1 (0xF2100000)
+#define S5PV210_PA_VIC2 (0xF2200000)
+#define S5PV210_PA_VIC3 (0xF2300000)
-#define S5PV210_PA_MIPI_CSIS 0xFA600000
+#define S5PV210_PA_SDRAM (0x20000000)
+#define S5P_PA_SDRAM S5PV210_PA_SDRAM
-#define S5PV210_PA_FIMC0 0xFB200000
-#define S5PV210_PA_FIMC1 0xFB300000
-#define S5PV210_PA_FIMC2 0xFB400000
+/* S/PDIF */
+#define S5PV210_PA_SPDIF 0xE1100000
-/* Compatibiltiy Defines */
+/* I2S */
+#define S5PV210_PA_IIS0 0xEEE30000
+#define S5PV210_PA_IIS1 0xE2100000
+#define S5PV210_PA_IIS2 0xE2A00000
-#define S3C_PA_FB S5PV210_PA_FB
-#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
-#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
-#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
-#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
-#define S3C_PA_IIC S5PV210_PA_IIC0
-#define S3C_PA_IIC1 S5PV210_PA_IIC1
-#define S3C_PA_IIC2 S5PV210_PA_IIC2
-#define S3C_PA_RTC S5PV210_PA_RTC
-#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
-#define S3C_PA_WDT S5PV210_PA_WATCHDOG
+/* PCM */
+#define S5PV210_PA_PCM0 0xE2300000
+#define S5PV210_PA_PCM1 0xE1200000
+#define S5PV210_PA_PCM2 0xE2B00000
-#define S5P_PA_CHIPID S5PV210_PA_CHIPID
-#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
-#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
-#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
-#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
-#define S5P_PA_ONENAND S5PC110_PA_ONENAND
-#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
-#define S5P_PA_SDRAM S5PV210_PA_SDRAM
-#define S5P_PA_SROMC S5PV210_PA_SROMC
-#define S5P_PA_SYSCON S5PV210_PA_SYSCON
-#define S5P_PA_TIMER S5PV210_PA_TIMER
+/* AC97 */
+#define S5PV210_PA_AC97 0xE2200000
-#define SAMSUNG_PA_ADC S5PV210_PA_ADC
-#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
-#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
+#define S5PV210_PA_ADC (0xE1700000)
-/* UART */
+#define S5PV210_PA_DMC0 (0xF0000000)
+#define S5PV210_PA_DMC1 (0xF1400000)
-#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+#define S5PV210_PA_MIPI_CSIS 0xFA600000
-#define S3C_PA_UART S5PV210_PA_UART
+/* compatibiltiy defines. */
+#define S3C_PA_UART S5PV210_PA_UART
+#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
+#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
+#define S3C_PA_IIC S5PV210_PA_IIC0
+#define S3C_PA_IIC1 S5PV210_PA_IIC1
+#define S3C_PA_IIC2 S5PV210_PA_IIC2
+#define S3C_PA_FB S5PV210_PA_FB
+#define S3C_PA_RTC S5PV210_PA_RTC
+#define S3C_PA_WDT S5PV210_PA_WATCHDOG
+#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
+#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
+#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
+#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
+#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-#define S5P_PA_UART3 S5P_PA_UART(3)
-
-#define S5P_SZ_UART SZ_256
+#define SAMSUNG_PA_ADC S5PV210_PA_ADC
+#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
+#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
#endif /* __ASM_ARCH_MAP_H */
diff --git a/trunk/arch/arm/mach-s5pv210/mach-aquila.c b/trunk/arch/arm/mach-s5pv210/mach-aquila.c
index 557add4fc56c..461aa035afc0 100644
--- a/trunk/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/trunk/arch/arm/mach-s5pv210/mach-aquila.c
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = {
static struct regulator_init_data aquila_ldo3_data = {
.constraints = {
- .name = "VUSB+MIPI_1.1V",
+ .name = "VUSB/MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = {
static struct regulator_init_data aquila_ldo8_data = {
.constraints = {
- .name = "VUSB+VADC_3.3V",
+ .name = "VUSB/VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = {
static struct regulator_init_data aquila_ldo9_data = {
.constraints = {
- .name = "VCC+VCAM_2.8V",
+ .name = "VCC/VCAM_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
@@ -381,12 +381,9 @@ static struct max8998_platform_data aquila_max8998_pdata = {
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
- .buck1_voltage1 = 1200000,
- .buck1_voltage2 = 1200000,
- .buck1_voltage3 = 1200000,
- .buck1_voltage4 = 1200000,
- .buck2_voltage1 = 1200000,
- .buck2_voltage2 = 1200000,
+ .buck1_max_voltage1 = 1200000,
+ .buck1_max_voltage2 = 1200000,
+ .buck2_max_voltage = 1200000,
};
#endif
diff --git a/trunk/arch/arm/mach-s5pv210/mach-goni.c b/trunk/arch/arm/mach-s5pv210/mach-goni.c
index 056f5c769b0a..e22d5112fd44 100644
--- a/trunk/arch/arm/mach-s5pv210/mach-goni.c
+++ b/trunk/arch/arm/mach-s5pv210/mach-goni.c
@@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = {
static struct regulator_init_data goni_ldo3_data = {
.constraints = {
- .name = "VUSB+MIPI_1.1V",
+ .name = "VUSB/MIPI_1.1V",
.min_uV = 1100000,
.max_uV = 1100000,
.apply_uV = 1,
@@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = {
static struct regulator_init_data goni_ldo8_data = {
.constraints = {
- .name = "VUSB+VADC_3.3V",
+ .name = "VUSB/VADC_3.3V",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
@@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = {
static struct regulator_init_data goni_ldo9_data = {
.constraints = {
- .name = "VCC+VCAM_2.8V",
+ .name = "VCC/VCAM_2.8V",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
@@ -521,12 +521,9 @@ static struct max8998_platform_data goni_max8998_pdata = {
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
- .buck1_voltage1 = 1200000,
- .buck1_voltage2 = 1200000,
- .buck1_voltage3 = 1200000,
- .buck1_voltage4 = 1200000,
- .buck2_voltage1 = 1200000,
- .buck2_voltage2 = 1200000,
+ .buck1_max_voltage1 = 1200000,
+ .buck1_max_voltage2 = 1200000,
+ .buck2_max_voltage = 1200000,
};
#endif
diff --git a/trunk/arch/arm/mach-s5pv310/Kconfig b/trunk/arch/arm/mach-s5pv310/Kconfig
index b2a9acc5185f..09c4c21b70cc 100644
--- a/trunk/arch/arm/mach-s5pv310/Kconfig
+++ b/trunk/arch/arm/mach-s5pv310/Kconfig
@@ -122,7 +122,6 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select S5PV310_DEV_PD
- select S5PV310_DEV_SYSMMU
select S5PV310_SETUP_I2C1
select S5PV310_SETUP_SDHCI
help
diff --git a/trunk/arch/arm/mach-s5pv310/include/mach/map.h b/trunk/arch/arm/mach-s5pv310/include/mach/map.h
index 901657fa7a12..74d400625a23 100644
--- a/trunk/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/trunk/arch/arm/mach-s5pv310/include/mach/map.h
@@ -1,6 +1,6 @@
/* linux/arch/arm/mach-s5pv310/include/mach/map.h
*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV310 - Memory map definitions
@@ -23,43 +23,90 @@
#include
-#define S5PV310_PA_SYSRAM 0x02025000
+#define S5PV310_PA_SYSRAM (0x02025000)
-#define S5PV310_PA_I2S0 0x03830000
-#define S5PV310_PA_I2S1 0xE3100000
-#define S5PV310_PA_I2S2 0xE2A00000
+#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
-#define S5PV310_PA_PCM0 0x03840000
-#define S5PV310_PA_PCM1 0x13980000
-#define S5PV310_PA_PCM2 0x13990000
+#define S5PC210_PA_ONENAND (0x0C000000)
+#define S5P_PA_ONENAND S5PC210_PA_ONENAND
-#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
+#define S5PC210_PA_ONENAND_DMA (0x0C600000)
+#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
+
+#define S5PV310_PA_CHIPID (0x10000000)
+#define S5P_PA_CHIPID S5PV310_PA_CHIPID
+
+#define S5PV310_PA_SYSCON (0x10010000)
+#define S5P_PA_SYSCON S5PV310_PA_SYSCON
+
+#define S5PV310_PA_PMU (0x10020000)
-#define S5PC210_PA_ONENAND 0x0C000000
-#define S5PC210_PA_ONENAND_DMA 0x0C600000
+#define S5PV310_PA_CMU (0x10030000)
-#define S5PV310_PA_CHIPID 0x10000000
+#define S5PV310_PA_WATCHDOG (0x10060000)
+#define S5PV310_PA_RTC (0x10070000)
-#define S5PV310_PA_SYSCON 0x10010000
-#define S5PV310_PA_PMU 0x10020000
-#define S5PV310_PA_CMU 0x10030000
+#define S5PV310_PA_DMC0 (0x10400000)
-#define S5PV310_PA_WATCHDOG 0x10060000
-#define S5PV310_PA_RTC 0x10070000
+#define S5PV310_PA_COMBINER (0x10448000)
-#define S5PV310_PA_DMC0 0x10400000
+#define S5PV310_PA_COREPERI (0x10500000)
+#define S5PV310_PA_GIC_CPU (0x10500100)
+#define S5PV310_PA_TWD (0x10500600)
+#define S5PV310_PA_GIC_DIST (0x10501000)
+#define S5PV310_PA_L2CC (0x10502000)
+
+/* DMA */
+#define S5PV310_PA_MDMA 0x10810000
+#define S5PV310_PA_PDMA0 0x12680000
+#define S5PV310_PA_PDMA1 0x12690000
+
+#define S5PV310_PA_GPIO1 (0x11400000)
+#define S5PV310_PA_GPIO2 (0x11000000)
+#define S5PV310_PA_GPIO3 (0x03860000)
+
+#define S5PV310_PA_MIPI_CSIS0 0x11880000
+#define S5PV310_PA_MIPI_CSIS1 0x11890000
+
+#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
+
+#define S5PV310_PA_SROMC (0x12570000)
+#define S5P_PA_SROMC S5PV310_PA_SROMC
-#define S5PV310_PA_COMBINER 0x10448000
+/* S/PDIF */
+#define S5PV310_PA_SPDIF 0xE1100000
-#define S5PV310_PA_COREPERI 0x10500000
-#define S5PV310_PA_GIC_CPU 0x10500100
-#define S5PV310_PA_TWD 0x10500600
-#define S5PV310_PA_GIC_DIST 0x10501000
-#define S5PV310_PA_L2CC 0x10502000
+/* I2S */
+#define S5PV310_PA_I2S0 0x03830000
+#define S5PV310_PA_I2S1 0xE3100000
+#define S5PV310_PA_I2S2 0xE2A00000
-#define S5PV310_PA_MDMA 0x10810000
-#define S5PV310_PA_PDMA0 0x12680000
-#define S5PV310_PA_PDMA1 0x12690000
+/* PCM */
+#define S5PV310_PA_PCM0 0x03840000
+#define S5PV310_PA_PCM1 0x13980000
+#define S5PV310_PA_PCM2 0x13990000
+
+/* AC97 */
+#define S5PV310_PA_AC97 0x139A0000
+
+#define S5PV310_PA_UART (0x13800000)
+
+#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
+#define S5P_PA_UART0 S5P_PA_UART(0)
+#define S5P_PA_UART1 S5P_PA_UART(1)
+#define S5P_PA_UART2 S5P_PA_UART(2)
+#define S5P_PA_UART3 S5P_PA_UART(3)
+#define S5P_PA_UART4 S5P_PA_UART(4)
+
+#define S5P_SZ_UART SZ_256
+
+#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
+
+#define S5PV310_PA_TIMER (0x139D0000)
+#define S5P_PA_TIMER S5PV310_PA_TIMER
+
+#define S5PV310_PA_SDRAM (0x40000000)
+#define S5P_PA_SDRAM S5PV310_PA_SDRAM
#define S5PV310_PA_SYSMMU_MDMA 0x10A40000
#define S5PV310_PA_SYSMMU_SSS 0x10A50000
@@ -77,32 +124,11 @@
#define S5PV310_PA_SYSMMU_TV 0x12E20000
#define S5PV310_PA_SYSMMU_MFC_L 0x13620000
#define S5PV310_PA_SYSMMU_MFC_R 0x13630000
+#define S5PV310_SYSMMU_TOTAL_IPNUM 16
+#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM
-#define S5PV310_PA_GPIO1 0x11400000
-#define S5PV310_PA_GPIO2 0x11000000
-#define S5PV310_PA_GPIO3 0x03860000
-
-#define S5PV310_PA_MIPI_CSIS0 0x11880000
-#define S5PV310_PA_MIPI_CSIS1 0x11890000
-
-#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
-
-#define S5PV310_PA_SROMC 0x12570000
-
-#define S5PV310_PA_UART 0x13800000
-
-#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
-
-#define S5PV310_PA_AC97 0x139A0000
-
-#define S5PV310_PA_TIMER 0x139D0000
-
-#define S5PV310_PA_SDRAM 0x40000000
-
-#define S5PV310_PA_SPDIF 0xE1100000
-
-/* Compatibiltiy Defines */
-
+/* compatibiltiy defines. */
+#define S3C_PA_UART S5PV310_PA_UART
#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
@@ -117,28 +143,7 @@
#define S3C_PA_IIC7 S5PV310_PA_IIC(7)
#define S3C_PA_RTC S5PV310_PA_RTC
#define S3C_PA_WDT S5PV310_PA_WATCHDOG
-
-#define S5P_PA_CHIPID S5PV310_PA_CHIPID
#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0
#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1
-#define S5P_PA_ONENAND S5PC210_PA_ONENAND
-#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
-#define S5P_PA_SDRAM S5PV310_PA_SDRAM
-#define S5P_PA_SROMC S5PV310_PA_SROMC
-#define S5P_PA_SYSCON S5PV310_PA_SYSCON
-#define S5P_PA_TIMER S5PV310_PA_TIMER
-
-/* UART */
-
-#define S3C_PA_UART S5PV310_PA_UART
-
-#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
-#define S5P_PA_UART0 S5P_PA_UART(0)
-#define S5P_PA_UART1 S5P_PA_UART(1)
-#define S5P_PA_UART2 S5P_PA_UART(2)
-#define S5P_PA_UART3 S5P_PA_UART(3)
-#define S5P_PA_UART4 S5P_PA_UART(4)
-
-#define S5P_SZ_UART SZ_256
#endif /* __ASM_ARCH_MAP_H */
diff --git a/trunk/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/trunk/arch/arm/mach-s5pv310/include/mach/sysmmu.h
index 598fc5c9211b..662fe85ff4d5 100644
--- a/trunk/arch/arm/mach-s5pv310/include/mach/sysmmu.h
+++ b/trunk/arch/arm/mach-s5pv310/include/mach/sysmmu.h
@@ -13,9 +13,6 @@
#ifndef __ASM_ARM_ARCH_SYSMMU_H
#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
-#define S5PV310_SYSMMU_TOTAL_IPNUM 16
-#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM
-
enum s5pv310_sysmmu_ips {
SYSMMU_MDMA,
SYSMMU_SSS,
@@ -35,7 +32,7 @@ enum s5pv310_sysmmu_ips {
SYSMMU_MFC_R,
};
-static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
+static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = {
"SYSMMU_MDMA" ,
"SYSMMU_SSS" ,
"SYSMMU_FIMC0" ,
diff --git a/trunk/arch/arm/mach-sa1100/collie.c b/trunk/arch/arm/mach-sa1100/collie.c
index bd3e1bfdd6aa..d43c5ef58eb6 100644
--- a/trunk/arch/arm/mach-sa1100/collie.c
+++ b/trunk/arch/arm/mach-sa1100/collie.c
@@ -241,9 +241,6 @@ static struct locomo_platform_data locomo_info = {
struct platform_device collie_locomo_device = {
.name = "locomo",
.id = 0,
- .dev = {
- .platform_data = &locomo_info,
- },
.num_resources = ARRAY_SIZE(locomo_resources),
.resource = locomo_resources,
};
diff --git a/trunk/arch/arm/mach-shmobile/board-ag5evm.c b/trunk/arch/arm/mach-shmobile/board-ag5evm.c
index 4303a86e6e38..2123b96b5638 100644
--- a/trunk/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/trunk/arch/arm/mach-shmobile/board-ag5evm.c
@@ -454,7 +454,6 @@ static void __init ag5evm_init(void)
gpio_direction_output(GPIO_PORT217, 0);
mdelay(1);
gpio_set_value(GPIO_PORT217, 1);
- mdelay(100);
/* LCD backlight controller */
gpio_request(GPIO_PORT235, NULL); /* RESET */
diff --git a/trunk/arch/arm/mach-shmobile/board-ap4evb.c b/trunk/arch/arm/mach-shmobile/board-ap4evb.c
index 81d6536552a9..3cf0951caa2d 100644
--- a/trunk/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/trunk/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
lcdc_info.clock_source = LCDC_CLK_BUS;
lcdc_info.ch[0].interface_type = RGB18;
- lcdc_info.ch[0].clock_divider = 3;
+ lcdc_info.ch[0].clock_divider = 2;
lcdc_info.ch[0].flags = 0;
lcdc_info.ch[0].lcd_size_cfg.width = 152;
lcdc_info.ch[0].lcd_size_cfg.height = 91;
diff --git a/trunk/arch/arm/mach-shmobile/board-mackerel.c b/trunk/arch/arm/mach-shmobile/board-mackerel.c
index 1657eac5dde2..fb4213a4e15a 100644
--- a/trunk/arch/arm/mach-shmobile/board-mackerel.c
+++ b/trunk/arch/arm/mach-shmobile/board-mackerel.c
@@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.lcd_cfg = mackerel_lcdc_modes,
.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
.interface_type = RGB24,
- .clock_divider = 3,
+ .clock_divider = 2,
.flags = 0,
.lcd_size_cfg.width = 152,
.lcd_size_cfg.height = 91,
diff --git a/trunk/arch/arm/mach-shmobile/clock-sh73a0.c b/trunk/arch/arm/mach-shmobile/clock-sh73a0.c
index 7e58904c1c8c..ddd4a1b775f0 100644
--- a/trunk/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/trunk/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
};
enum { MSTP001,
- MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
+ MSTP125, MSTP118, MSTP116, MSTP100,
MSTP219,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
@@ -275,10 +275,6 @@ enum { MSTP001,
static struct clk mstp_clks[MSTP_NR] = {
[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
- [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
- [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
- [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
- [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
@@ -310,9 +306,6 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("r_clk", &r_clk),
/* DIV6 clocks */
- CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
- CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
- CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -320,15 +313,11 @@ static struct clk_lookup lookups[] = {
/* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
- CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
- CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
- CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
- CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
- CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
- CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
+ CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
diff --git a/trunk/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/trunk/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index 3029aba38688..efd3687ba190 100644
--- a/trunk/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/trunk/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -6,10 +6,13 @@ LIST "RWT Setting"
EW 0xE6020004, 0xA500
EW 0xE6030004, 0xA500
+DD 0x01001000, 0x01001000
+
LIST "GPIO Setting"
EB 0xE6051013, 0xA2
LIST "CPG"
+ED 0xE6150080, 0x00000180
ED 0xE61500C0, 0x00000002
WAIT 1, 0xFE40009C
@@ -34,9 +37,6 @@ ED 0xE615002C, 0x93000040
WAIT 1, 0xFE40009C
-LIST "SUB/USBClk"
-ED 0xE6150080, 0x00000180
-
LIST "BSC"
ED 0xFEC10000, 0x00E0001B
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
ED 0xFE40004C, 0x00110209
ED 0xFE400010, 0x00000087
-WAIT 30, 0xFE40009C
+WAIT 10, 0xFE40009C
ED 0xFE400084, 0x0000003F
EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
WAIT 1, 0xFE40009C
-ED 0xFE400354, 0x01AD8002
+ED 0xE6150354, 0x00000002
LIST "SCIF0 - Serial port for earlyprintk"
EB 0xE6053098, 0x11
diff --git a/trunk/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/trunk/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index 3029aba38688..efd3687ba190 100644
--- a/trunk/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/trunk/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -6,10 +6,13 @@ LIST "RWT Setting"
EW 0xE6020004, 0xA500
EW 0xE6030004, 0xA500
+DD 0x01001000, 0x01001000
+
LIST "GPIO Setting"
EB 0xE6051013, 0xA2
LIST "CPG"
+ED 0xE6150080, 0x00000180
ED 0xE61500C0, 0x00000002
WAIT 1, 0xFE40009C
@@ -34,9 +37,6 @@ ED 0xE615002C, 0x93000040
WAIT 1, 0xFE40009C
-LIST "SUB/USBClk"
-ED 0xE6150080, 0x00000180
-
LIST "BSC"
ED 0xFEC10000, 0x00E0001B
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
ED 0xFE40004C, 0x00110209
ED 0xFE400010, 0x00000087
-WAIT 30, 0xFE40009C
+WAIT 10, 0xFE40009C
ED 0xFE400084, 0x0000003F
EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
WAIT 1, 0xFE40009C
-ED 0xFE400354, 0x01AD8002
+ED 0xE6150354, 0x00000002
LIST "SCIF0 - Serial port for earlyprintk"
EB 0xE6053098, 0x11
diff --git a/trunk/arch/arm/mach-spear3xx/include/mach/spear320.h b/trunk/arch/arm/mach-spear3xx/include/mach/spear320.h
index 53677e464d4b..cacf17a958cd 100644
--- a/trunk/arch/arm/mach-spear3xx/include/mach/spear320.h
+++ b/trunk/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -62,7 +62,7 @@
#define SPEAR320_SMII1_BASE 0xAB000000
#define SPEAR320_SMII1_SIZE 0x01000000
-#define SPEAR320_SOC_CONFIG_BASE 0xB3000000
+#define SPEAR320_SOC_CONFIG_BASE 0xB4000000
#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
/* Interrupt registers offsets and masks */
#define INT_STS_MASK_REG 0x04
diff --git a/trunk/arch/arm/mach-tegra/include/mach/kbc.h b/trunk/arch/arm/mach-tegra/include/mach/kbc.h
index 04c779832c78..66ad2760c621 100644
--- a/trunk/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/trunk/arch/arm/mach-tegra/include/mach/kbc.h
@@ -57,6 +57,5 @@ struct tegra_kbc_platform_data {
const struct matrix_keymap_data *keymap_data;
bool wakeup;
- bool use_fn_map;
};
#endif
diff --git a/trunk/arch/arm/mm/Kconfig b/trunk/arch/arm/mm/Kconfig
index e4509bae8fc4..9d30c6f804b9 100644
--- a/trunk/arch/arm/mm/Kconfig
+++ b/trunk/arch/arm/mm/Kconfig
@@ -405,7 +405,7 @@ config CPU_V6
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6 || CPU_V7
- default y if SMP
+ default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
@@ -416,7 +416,7 @@ config CPU_32v6K
# ARMv7
config CPU_V7
bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
- select CPU_32v6K
+ select CPU_32v6K if !ARCH_OMAP2
select CPU_32v7
select CPU_ABRT_EV7
select CPU_PABRT_V7
@@ -644,7 +644,7 @@ config ARM_THUMBEE
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
- depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
+ depends on CPU_V7 && !CPU_V6
select HAVE_PROC_CPU if PROC_FS
default y if SMP
help
diff --git a/trunk/arch/arm/mm/cache-l2x0.c b/trunk/arch/arm/mm/cache-l2x0.c
index f2ce38e085d2..170c9bb95866 100644
--- a/trunk/arch/arm/mm/cache-l2x0.c
+++ b/trunk/arch/arm/mm/cache-l2x0.c
@@ -49,13 +49,7 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
-
-#ifdef CONFIG_ARM_ERRATA_753970
- /* write to an unmmapped register */
- writel_relaxed(0, base + L2X0_DUMMY_REG);
-#else
writel_relaxed(0, base + L2X0_CACHE_SYNC);
-#endif
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S
index 8e3356239136..0c1172b56b4e 100644
--- a/trunk/arch/arm/mm/proc-v7.S
+++ b/trunk/arch/arm/mm/proc-v7.S
@@ -264,12 +264,6 @@ __v7_setup:
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
-#ifdef CONFIG_ARM_ERRATA_751472
- cmp r6, #0x30 @ present prior to r3p0
- mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orrlt r10, r10, #1 << 11 @ set bit #11
- mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
3: mov r10, #0
#ifdef HARVARD_CACHE
diff --git a/trunk/arch/arm/oprofile/common.c b/trunk/arch/arm/oprofile/common.c
index c074e66ad224..8aa974491dfc 100644
--- a/trunk/arch/arm/oprofile/common.c
+++ b/trunk/arch/arm/oprofile/common.c
@@ -10,6 +10,8 @@
*/
#include
+#include
+#include
#include
#include
#include
@@ -44,7 +46,6 @@ char *op_name_from_perf_id(void)
return NULL;
}
}
-#endif
static int report_trace(struct stackframe *frame, void *d)
{
@@ -84,7 +85,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail)
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
- if (tail + 1 >= buftail[0].fp)
+ if (tail >= buftail[0].fp)
return NULL;
return buftail[0].fp-1;
@@ -110,7 +111,6 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- /* provide backtrace support also in timer mode: */
ops->backtrace = arm_backtrace;
return oprofile_perf_init(ops);
@@ -120,3 +120,11 @@ void __exit oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
+#else
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ pr_info("oprofile: hardware counters not available\n");
+ return -ENODEV;
+}
+void __exit oprofile_arch_exit(void) {}
+#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/trunk/arch/arm/plat-omap/mailbox.c b/trunk/arch/arm/plat-omap/mailbox.c
index 49d3208793e5..459b319a9fad 100644
--- a/trunk/arch/arm/plat-omap/mailbox.c
+++ b/trunk/arch/arm/plat-omap/mailbox.c
@@ -322,18 +322,15 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
{
- struct omap_mbox *_mbox, *mbox = NULL;
- int i, ret;
+ struct omap_mbox *mbox;
+ int ret;
if (!mboxes)
return ERR_PTR(-EINVAL);
- for (i = 0; (_mbox = mboxes[i]); i++) {
- if (!strcmp(_mbox->name, name)) {
- mbox = _mbox;
+ for (mbox = *mboxes; mbox; mbox++)
+ if (!strcmp(mbox->name, name))
break;
- }
- }
if (!mbox)
return ERR_PTR(-ENOENT);
diff --git a/trunk/arch/arm/plat-pxa/mfp.c b/trunk/arch/arm/plat-pxa/mfp.c
index a9aa5ad3f4eb..b77e018d36c1 100644
--- a/trunk/arch/arm/plat-pxa/mfp.c
+++ b/trunk/arch/arm/plat-pxa/mfp.c
@@ -139,11 +139,10 @@ static const unsigned long mfpr_edge[] = {
#define mfp_configured(p) ((p)->config != -1)
/*
- * perform a read-back of any valid MFPR register to make sure the
+ * perform a read-back of any MFPR register to make sure the
* previous writings are finished
*/
-static unsigned long mfpr_off_readback;
-#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
+#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + 0)
static inline void __mfp_config_run(struct mfp_pin *p)
{
@@ -249,9 +248,6 @@ void __init mfp_init_addr(struct mfp_addr_map *map)
spin_lock_irqsave(&mfp_spin_lock, flags);
- /* mfp offset for readback */
- mfpr_off_readback = map[0].offset;
-
for (p = map; p->start != MFP_PIN_INVALID; p++) {
offset = p->offset;
i = p->start;
diff --git a/trunk/arch/arm/plat-s5p/Kconfig b/trunk/arch/arm/plat-s5p/Kconfig
index 557f8c507f6d..deb39951a22e 100644
--- a/trunk/arch/arm/plat-s5p/Kconfig
+++ b/trunk/arch/arm/plat-s5p/Kconfig
@@ -37,14 +37,6 @@ config S5P_GPIO_INT
help
Common code for the GPIO interrupts (other than external interrupts.)
-comment "System MMU"
-
-config S5P_SYSTEM_MMU
- bool "S5P SYSTEM MMU"
- depends on ARCH_S5PV310
- help
- Say Y here if you want to enable System MMU
-
config S5P_DEV_FIMC0
bool
help
@@ -74,3 +66,19 @@ config S5P_DEV_CSIS1
bool
help
Compile in platform device definitions for MIPI-CSIS channel 1
+
+menuconfig S5P_SYSMMU
+ bool "SYSMMU support"
+ depends on ARCH_S5PV310
+ help
+ This is a System MMU driver for Samsung ARM based Soc.
+
+if S5P_SYSMMU
+
+config S5P_SYSMMU_DEBUG
+ bool "Enables debug messages"
+ depends on S5P_SYSMMU
+ help
+ This enables SYSMMU driver debug massages.
+
+endif
diff --git a/trunk/arch/arm/plat-s5p/Makefile b/trunk/arch/arm/plat-s5p/Makefile
index 4bd5cf908977..92efe1adcfd6 100644
--- a/trunk/arch/arm/plat-s5p/Makefile
+++ b/trunk/arch/arm/plat-s5p/Makefile
@@ -19,7 +19,6 @@ obj-y += clock.o
obj-y += irq.o
obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
-obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += irq-pm.o
@@ -31,3 +30,4 @@ obj-$(CONFIG_S5P_DEV_FIMC2) += dev-fimc2.o
obj-$(CONFIG_S5P_DEV_ONENAND) += dev-onenand.o
obj-$(CONFIG_S5P_DEV_CSIS0) += dev-csis0.o
obj-$(CONFIG_S5P_DEV_CSIS1) += dev-csis1.o
+obj-$(CONFIG_S5P_SYSMMU) += sysmmu.o
diff --git a/trunk/arch/arm/plat-s5p/dev-uart.c b/trunk/arch/arm/plat-s5p/dev-uart.c
index afaf87fdb93e..6a7342886171 100644
--- a/trunk/arch/arm/plat-s5p/dev-uart.c
+++ b/trunk/arch/arm/plat-s5p/dev-uart.c
@@ -28,7 +28,7 @@
static struct resource s5p_uart0_resource[] = {
[0] = {
.start = S5P_PA_UART0,
- .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
+ .end = S5P_PA_UART0 + S5P_SZ_UART,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = {
static struct resource s5p_uart1_resource[] = {
[0] = {
.start = S5P_PA_UART1,
- .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
+ .end = S5P_PA_UART1 + S5P_SZ_UART,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = {
static struct resource s5p_uart2_resource[] = {
[0] = {
.start = S5P_PA_UART2,
- .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
+ .end = S5P_PA_UART2 + S5P_SZ_UART,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
[0] = {
.start = S5P_PA_UART3,
- .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
+ .end = S5P_PA_UART3 + S5P_SZ_UART,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
[0] = {
.start = S5P_PA_UART4,
- .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
+ .end = S5P_PA_UART4 + S5P_SZ_UART,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
[0] = {
.start = S5P_PA_UART5,
- .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
+ .end = S5P_PA_UART5 + S5P_SZ_UART,
.flags = IORESOURCE_MEM,
},
[1] = {
diff --git a/trunk/arch/arm/plat-s5p/include/plat/sysmmu.h b/trunk/arch/arm/plat-s5p/include/plat/sysmmu.h
new file mode 100644
index 000000000000..db298fc5438a
--- /dev/null
+++ b/trunk/arch/arm/plat-s5p/include/plat/sysmmu.h
@@ -0,0 +1,23 @@
+/* linux/arch/arm/plat-s5p/include/plat/sysmmu.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung sysmmu driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_S5P_SYSMMU_H
+#define __ASM_PLAT_S5P_SYSMMU_H __FILE__
+
+/* debug macro */
+#ifdef CONFIG_S5P_SYSMMU_DEBUG
+#define sysmmu_debug(fmt, arg...) printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
+#else
+#define sysmmu_debug(fmt, arg...) do { } while (0)
+#endif
+
+#endif /* __ASM_PLAT_S5P_SYSMMU_H */
diff --git a/trunk/arch/arm/plat-s5p/sysmmu.c b/trunk/arch/arm/plat-s5p/sysmmu.c
index ffe8a48bc3c1..d804914dc2e2 100644
--- a/trunk/arch/arm/plat-s5p/sysmmu.c
+++ b/trunk/arch/arm/plat-s5p/sysmmu.c
@@ -16,6 +16,8 @@
#include
#include
+#include
+
struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM];
void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp)
@@ -121,7 +123,7 @@ static int s5p_sysmmu_set_tablebase(sysmmu_ips ips)
: "=r" (pg) : : "cc"); \
pg &= ~0x3fff;
- printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg);
+ sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg);
/* Set sysmmu page table base address */
__raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR);
diff --git a/trunk/arch/arm/plat-samsung/dev-ts.c b/trunk/arch/arm/plat-samsung/dev-ts.c
index 3e4bd8147bf4..236ef8427d7d 100644
--- a/trunk/arch/arm/plat-samsung/dev-ts.c
+++ b/trunk/arch/arm/plat-samsung/dev-ts.c
@@ -58,3 +58,4 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
s3c_device_ts.dev.platform_data = npd;
}
+EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/trunk/arch/arm/plat-samsung/dev-uart.c b/trunk/arch/arm/plat-samsung/dev-uart.c
index 5928105490fa..3776cd952450 100644
--- a/trunk/arch/arm/plat-samsung/dev-uart.c
+++ b/trunk/arch/arm/plat-samsung/dev-uart.c
@@ -15,8 +15,6 @@
#include
#include
-#include
-
/* uart devices */
static struct platform_device s3c24xx_uart_device0 = {
diff --git a/trunk/arch/arm/plat-samsung/include/plat/pm.h b/trunk/arch/arm/plat-samsung/include/plat/pm.h
index 30518cc9a67c..d9025e377675 100644
--- a/trunk/arch/arm/plat-samsung/include/plat/pm.h
+++ b/trunk/arch/arm/plat-samsung/include/plat/pm.h
@@ -17,8 +17,6 @@
#include
-struct sys_device;
-
#ifdef CONFIG_PM
extern __init int s3c_pm_init(void);
diff --git a/trunk/arch/arm/plat-spear/include/plat/uncompress.h b/trunk/arch/arm/plat-spear/include/plat/uncompress.h
index 6dd455bafdfd..99ba6789cc97 100644
--- a/trunk/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/trunk/arch/arm/plat-spear/include/plat/uncompress.h
@@ -24,10 +24,10 @@ static inline void putc(int c)
{
void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
- while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
+ while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
barrier();
- writel_relaxed(c, base + UART01x_DR);
+ writel(c, base + UART01x_DR);
}
static inline void flush(void)
diff --git a/trunk/arch/arm/plat-spear/include/plat/vmalloc.h b/trunk/arch/arm/plat-spear/include/plat/vmalloc.h
index 8c8b24d07046..09e9372aea21 100644
--- a/trunk/arch/arm/plat-spear/include/plat/vmalloc.h
+++ b/trunk/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -14,6 +14,6 @@
#ifndef __PLAT_VMALLOC_H
#define __PLAT_VMALLOC_H
-#define VMALLOC_END 0xF0000000UL
+#define VMALLOC_END 0xF0000000
#endif /* __PLAT_VMALLOC_H */
diff --git a/trunk/arch/blackfin/include/asm/bfin_serial.h b/trunk/arch/blackfin/include/asm/bfin_serial.h
index 7dbc664eab1e..1ff9f1468c02 100644
--- a/trunk/arch/blackfin/include/asm/bfin_serial.h
+++ b/trunk/arch/blackfin/include/asm/bfin_serial.h
@@ -10,7 +10,6 @@
#define __BFIN_ASM_SERIAL_H__
#include
-#include
#include
#include
@@ -42,7 +41,6 @@ struct bfin_serial_port {
struct circ_buf rx_dma_buf;
struct timer_list rx_dma_timer;
int rx_dma_nrows;
- spinlock_t rx_lock;
unsigned int tx_dma_channel;
unsigned int rx_dma_channel;
struct work_struct tx_dma_workqueue;
diff --git a/trunk/arch/blackfin/kernel/time.c b/trunk/arch/blackfin/kernel/time.c
index 8d73724c0092..c9113619029f 100644
--- a/trunk/arch/blackfin/kernel/time.c
+++ b/trunk/arch/blackfin/kernel/time.c
@@ -114,14 +114,16 @@ u32 arch_gettimeoffset(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t timer_interrupt(int irq, void *dummy)
{
- xtime_update(1);
+ write_seqlock(&xtime_lock);
+ do_timer(1);
+ write_sequnlock(&xtime_lock);
#ifdef CONFIG_IPIPE
update_root_process_times(get_irq_regs());
diff --git a/trunk/arch/blackfin/lib/outs.S b/trunk/arch/blackfin/lib/outs.S
index 06a5e674401f..250f4d4b9436 100644
--- a/trunk/arch/blackfin/lib/outs.S
+++ b/trunk/arch/blackfin/lib/outs.S
@@ -13,8 +13,6 @@
.align 2
ENTRY(_outsl)
- CC = R2 == 0;
- IF CC JUMP 1f;
P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
@@ -22,12 +20,10 @@ ENTRY(_outsl)
LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
.Llong_loop_s: R0 = [P1++];
.Llong_loop_e: [P0] = R0;
-1: RTS;
+ RTS;
ENDPROC(_outsl)
ENTRY(_outsw)
- CC = R2 == 0;
- IF CC JUMP 1f;
P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
@@ -35,12 +31,10 @@ ENTRY(_outsw)
LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
.Lword_loop_s: R0 = W[P1++];
.Lword_loop_e: W[P0] = R0;
-1: RTS;
+ RTS;
ENDPROC(_outsw)
ENTRY(_outsb)
- CC = R2 == 0;
- IF CC JUMP 1f;
P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
@@ -48,12 +42,10 @@ ENTRY(_outsb)
LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
.Lbyte_loop_s: R0 = B[P1++];
.Lbyte_loop_e: B[P0] = R0;
-1: RTS;
+ RTS;
ENDPROC(_outsb)
ENTRY(_outsw_8)
- CC = R2 == 0;
- IF CC JUMP 1f;
P0 = R0; /* P0 = port */
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
@@ -64,5 +56,5 @@ ENTRY(_outsw_8)
R0 = R0 << 8;
R0 = R0 + R1;
.Lword8_loop_e: W[P0] = R0;
-1: RTS;
+ RTS;
ENDPROC(_outsw_8)
diff --git a/trunk/arch/blackfin/mach-common/cache.S b/trunk/arch/blackfin/mach-common/cache.S
index ab4a925a443e..790c767ca95a 100644
--- a/trunk/arch/blackfin/mach-common/cache.S
+++ b/trunk/arch/blackfin/mach-common/cache.S
@@ -58,8 +58,6 @@
1:
.ifeqs "\flushins", BROK_FLUSH_INST
\flushins [P0++];
- nop;
- nop;
2: nop;
.else
2: \flushins [P0++];
diff --git a/trunk/arch/cris/arch-v10/kernel/time.c b/trunk/arch/cris/arch-v10/kernel/time.c
index 20c85b5dc7d0..00eb36f8debf 100644
--- a/trunk/arch/cris/arch-v10/kernel/time.c
+++ b/trunk/arch/cris/arch-v10/kernel/time.c
@@ -140,7 +140,7 @@ stop_watchdog(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
//static unsigned short myjiff; /* used by our debug routine print_timestamp */
@@ -176,7 +176,7 @@ timer_interrupt(int irq, void *dev_id)
/* call the real timer interrupt handler */
- xtime_update(1);
+ do_timer(1);
cris_do_profile(regs); /* Save profiling information */
return IRQ_HANDLED;
diff --git a/trunk/arch/cris/arch-v32/kernel/smp.c b/trunk/arch/cris/arch-v32/kernel/smp.c
index 4c9e3e1ba5d1..84fed3b4b079 100644
--- a/trunk/arch/cris/arch-v32/kernel/smp.c
+++ b/trunk/arch/cris/arch-v32/kernel/smp.c
@@ -26,9 +26,7 @@
#define FLUSH_ALL (void*)0xffffffff
/* Vector of locks used for various atomic operations */
-spinlock_t cris_atomic_locks[] = {
- [0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
-};
+spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
/* CPU masks */
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
diff --git a/trunk/arch/cris/arch-v32/kernel/time.c b/trunk/arch/cris/arch-v32/kernel/time.c
index bb978ede8985..a545211e999d 100644
--- a/trunk/arch/cris/arch-v32/kernel/time.c
+++ b/trunk/arch/cris/arch-v32/kernel/time.c
@@ -183,7 +183,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick.
+ * as well as call the "do_timer()" routine every clocktick.
*/
extern void cris_do_profile(struct pt_regs *regs);
@@ -216,7 +216,9 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
/* Call the real timer interrupt handler */
- xtime_update(1);
+ write_seqlock(&xtime_lock);
+ do_timer(1);
+ write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
diff --git a/trunk/arch/cris/kernel/vmlinux.lds.S b/trunk/arch/cris/kernel/vmlinux.lds.S
index c49be845f96a..442218980db0 100644
--- a/trunk/arch/cris/kernel/vmlinux.lds.S
+++ b/trunk/arch/cris/kernel/vmlinux.lds.S
@@ -72,6 +72,11 @@ SECTIONS
INIT_TEXT_SECTION(PAGE_SIZE)
.init.data : { INIT_DATA }
.init.setup : { INIT_SETUP(16) }
+#ifdef CONFIG_ETRAX_ARCH_V32
+ __start___param = .;
+ __param : { *(__param) }
+ __stop___param = .;
+#endif
.initcall.init : {
INIT_CALLS
}
diff --git a/trunk/arch/frv/include/asm/futex.h b/trunk/arch/frv/include/asm/futex.h
index 4bea27f50a7a..08b3d1da3583 100644
--- a/trunk/arch/frv/include/asm/futex.h
+++ b/trunk/arch/frv/include/asm/futex.h
@@ -7,11 +7,10 @@
#include
#include
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
return -ENOSYS;
}
diff --git a/trunk/arch/frv/kernel/futex.c b/trunk/arch/frv/kernel/futex.c
index d155ca9e5098..14f64b054c7e 100644
--- a/trunk/arch/frv/kernel/futex.c
+++ b/trunk/arch/frv/kernel/futex.c
@@ -18,7 +18,7 @@
* the various futex operations; MMU fault checking is ignored under no-MMU
* conditions
*/
-static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval)
{
int oldval, ret;
@@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_o
return ret;
}
-static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval)
{
int oldval, ret;
@@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_o
return ret;
}
-static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval)
{
int oldval, ret;
@@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_ol
return ret;
}
-static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval)
{
int oldval, ret;
@@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_o
return ret;
}
-static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
+static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval)
{
int oldval, ret;
@@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o
/*
* do the futex operations
*/
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
diff --git a/trunk/arch/frv/kernel/time.c b/trunk/arch/frv/kernel/time.c
index b457de496b70..0ddbbae83cb2 100644
--- a/trunk/arch/frv/kernel/time.c
+++ b/trunk/arch/frv/kernel/time.c
@@ -50,13 +50,21 @@ static struct irqaction timer_irq = {
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
profile_tick(CPU_PROFILING);
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don't need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
- xtime_update(1);
+ do_timer(1);
#ifdef CONFIG_HEARTBEAT
static unsigned short n;
@@ -64,6 +72,8 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
__set_LEDS(n);
#endif /* CONFIG_HEARTBEAT */
+ write_sequnlock(&xtime_lock);
+
update_process_times(user_mode(get_irq_regs()));
return IRQ_HANDLED;
diff --git a/trunk/arch/h8300/kernel/time.c b/trunk/arch/h8300/kernel/time.c
index 32263a138aa6..165005aff9df 100644
--- a/trunk/arch/h8300/kernel/time.c
+++ b/trunk/arch/h8300/kernel/time.c
@@ -35,7 +35,9 @@ void h8300_timer_tick(void)
{
if (current->pid)
profile_tick(CPU_PROFILING);
- xtime_update(1);
+ write_seqlock(&xtime_lock);
+ do_timer(1);
+ write_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
}
diff --git a/trunk/arch/h8300/kernel/timer/timer8.c b/trunk/arch/h8300/kernel/timer/timer8.c
index 7a1533fad47d..3946c0fa8374 100644
--- a/trunk/arch/h8300/kernel/timer/timer8.c
+++ b/trunk/arch/h8300/kernel/timer/timer8.c
@@ -61,7 +61,7 @@
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dev_id)
diff --git a/trunk/arch/ia64/include/asm/futex.h b/trunk/arch/ia64/include/asm/futex.h
index 8428525ddb22..c7f0f062239c 100644
--- a/trunk/arch/ia64/include/asm/futex.h
+++ b/trunk/arch/ia64/include/asm/futex.h
@@ -46,7 +46,7 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -100,26 +100,23 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
{
- register unsigned long r8 __asm ("r8") = 0;
- unsigned long prev;
+ register unsigned long r8 __asm ("r8");
__asm__ __volatile__(
" mf;; \n"
" mov ar.ccv=%3;; \n"
"[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (prev)
+ : "=r" (r8)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
- *uval = prev;
return r8;
}
}
diff --git a/trunk/arch/ia64/include/asm/rwsem.h b/trunk/arch/ia64/include/asm/rwsem.h
index 3027e7516d85..215d5454c7d3 100644
--- a/trunk/arch/ia64/include/asm/rwsem.h
+++ b/trunk/arch/ia64/include/asm/rwsem.h
@@ -25,8 +25,20 @@
#error "Please don't include directly, use instead."
#endif
+#include
+#include
+
#include
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
#define RWSEM_ACTIVE_BIAS (1L)
#define RWSEM_ACTIVE_MASK (0xffffffffL)
@@ -34,6 +46,26 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+static inline void
+init_rwsem (struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
/*
* lock for reading
*/
@@ -142,4 +174,9 @@ __downgrade_write (struct rw_semaphore *sem)
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* _ASM_IA64_RWSEM_H */
diff --git a/trunk/arch/ia64/include/asm/xen/hypercall.h b/trunk/arch/ia64/include/asm/xen/hypercall.h
index ed28bcd5bb85..96fc62366aa4 100644
--- a/trunk/arch/ia64/include/asm/xen/hypercall.h
+++ b/trunk/arch/ia64/include/asm/xen/hypercall.h
@@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
static inline int
xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
{
- return _hypercall2(int, sched_op, cmd, arg);
+ return _hypercall2(int, sched_op_new, cmd, arg);
}
static inline long
diff --git a/trunk/arch/ia64/kernel/time.c b/trunk/arch/ia64/kernel/time.c
index 156ad803d5b7..9702fa92489e 100644
--- a/trunk/arch/ia64/kernel/time.c
+++ b/trunk/arch/ia64/kernel/time.c
@@ -190,10 +190,19 @@ timer_interrupt (int irq, void *dev_id)
new_itm += local_cpu_data->itm_delta;
- if (smp_processor_id() == time_keeper_id)
- xtime_update(1);
-
- local_cpu_data->itm_next = new_itm;
+ if (smp_processor_id() == time_keeper_id) {
+ /*
+ * Here we are in the timer irq handler. We have irqs locally
+ * disabled, but we don't know if the timer_bh is running on
+ * another CPU. We need to avoid to SMP race by acquiring the
+ * xtime_lock.
+ */
+ write_seqlock(&xtime_lock);
+ do_timer(1);
+ local_cpu_data->itm_next = new_itm;
+ write_sequnlock(&xtime_lock);
+ } else
+ local_cpu_data->itm_next = new_itm;
if (time_after(new_itm, ia64_get_itc()))
break;
@@ -213,7 +222,7 @@ timer_interrupt (int irq, void *dev_id)
* comfort, we increase the safety margin by
* intentionally dropping the next tick(s). We do NOT
* update itm.next because that would force us to call
- * xtime_update() which in turn would let our clock run
+ * do_timer() which in turn would let our clock run
* too fast (with the potentially devastating effect
* of losing monotony of time).
*/
diff --git a/trunk/arch/ia64/xen/suspend.c b/trunk/arch/ia64/xen/suspend.c
index 419c8620945a..fd66b048c6fa 100644
--- a/trunk/arch/ia64/xen/suspend.c
+++ b/trunk/arch/ia64/xen/suspend.c
@@ -37,14 +37,19 @@ xen_mm_unpin_all(void)
/* nothing */
}
+void xen_pre_device_suspend(void)
+{
+ /* nothing */
+}
+
void
-xen_arch_pre_suspend()
+xen_pre_suspend()
{
/* nothing */
}
void
-xen_arch_post_suspend(int suspend_cancelled)
+xen_post_suspend(int suspend_cancelled)
{
if (suspend_cancelled)
return;
diff --git a/trunk/arch/ia64/xen/time.c b/trunk/arch/ia64/xen/time.c
index 1f8244a78bee..c1c544513e8d 100644
--- a/trunk/arch/ia64/xen/time.c
+++ b/trunk/arch/ia64/xen/time.c
@@ -139,11 +139,14 @@ consider_steal_time(unsigned long new_itm)
run_posix_cpu_timers(p);
delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
- if (cpu == time_keeper_id)
- xtime_update(stolen + blocked);
-
- local_cpu_data->itm_next = delta_itm + new_itm;
-
+ if (cpu == time_keeper_id) {
+ write_seqlock(&xtime_lock);
+ do_timer(stolen + blocked);
+ local_cpu_data->itm_next = delta_itm + new_itm;
+ write_sequnlock(&xtime_lock);
+ } else {
+ local_cpu_data->itm_next = delta_itm + new_itm;
+ }
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
}
diff --git a/trunk/arch/m32r/kernel/time.c b/trunk/arch/m32r/kernel/time.c
index 84dd04048db9..bda86820bffd 100644
--- a/trunk/arch/m32r/kernel/time.c
+++ b/trunk/arch/m32r/kernel/time.c
@@ -107,14 +107,15 @@ u32 arch_gettimeoffset(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
- xtime_update(1);
+ /* XXX FIXME. Uh, the xtime_lock should be held here, no? */
+ do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
diff --git a/trunk/arch/m68k/bvme6000/config.c b/trunk/arch/m68k/bvme6000/config.c
index 1edd95095cb4..9fe6fefb5e14 100644
--- a/trunk/arch/m68k/bvme6000/config.c
+++ b/trunk/arch/m68k/bvme6000/config.c
@@ -45,8 +45,8 @@ extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
+/* Save tick handler routine pointer, will point to do_timer() in
+ * kernel/sched.c, called via bvme6000_process_int() */
static irq_handler_t tick_handler;
diff --git a/trunk/arch/m68k/include/asm/string.h b/trunk/arch/m68k/include/asm/string.h
index 32198454da70..65b131282837 100644
--- a/trunk/arch/m68k/include/asm/string.h
+++ b/trunk/arch/m68k/include/asm/string.h
@@ -99,12 +99,14 @@ static inline int strcmp(const char *cs, const char *ct)
: "+a" (cs), "+a" (ct), "=d" (res));
return res;
}
-#endif /* CONFIG_COLDFIRE */
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, __kernel_size_t);
#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
+#endif /* CONFIG_COLDFIRE */
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
diff --git a/trunk/arch/m68k/kernel/time.c b/trunk/arch/m68k/kernel/time.c
index 18b34ee5db3b..06438dac08ff 100644
--- a/trunk/arch/m68k/kernel/time.c
+++ b/trunk/arch/m68k/kernel/time.c
@@ -37,11 +37,11 @@ static inline int set_rtc_mmss(unsigned long nowtime)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
- xtime_update(1);
+ do_timer(1);
update_process_times(user_mode(get_irq_regs()));
profile_tick(CPU_PROFILING);
diff --git a/trunk/arch/m68k/lib/string.c b/trunk/arch/m68k/lib/string.c
index d399c5f25636..4253f870e54f 100644
--- a/trunk/arch/m68k/lib/string.c
+++ b/trunk/arch/m68k/lib/string.c
@@ -243,3 +243,14 @@ void *memmove(void *dest, const void *src, size_t n)
return xdest;
}
EXPORT_SYMBOL(memmove);
+
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+
+ for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--)
+ if (*su1 != *su2)
+ return *su1 < *su2 ? -1 : +1;
+ return 0;
+}
+EXPORT_SYMBOL(memcmp);
diff --git a/trunk/arch/m68k/mvme147/config.c b/trunk/arch/m68k/mvme147/config.c
index 6cb9c3a9b6c9..100baaa692a1 100644
--- a/trunk/arch/m68k/mvme147/config.c
+++ b/trunk/arch/m68k/mvme147/config.c
@@ -46,8 +46,8 @@ extern void mvme147_reset (void);
static int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme147_process_int() */
+/* Save tick handler routine pointer, will point to do_timer() in
+ * kernel/sched.c, called via mvme147_process_int() */
irq_handler_t tick_handler;
diff --git a/trunk/arch/m68k/mvme16x/config.c b/trunk/arch/m68k/mvme16x/config.c
index 0b28e2621653..11edf61cc2c4 100644
--- a/trunk/arch/m68k/mvme16x/config.c
+++ b/trunk/arch/m68k/mvme16x/config.c
@@ -51,8 +51,8 @@ extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme16x_process_int() */
+/* Save tick handler routine pointer, will point to do_timer() in
+ * kernel/sched.c, called via mvme16x_process_int() */
static irq_handler_t tick_handler;
diff --git a/trunk/arch/m68k/sun3/sun3ints.c b/trunk/arch/m68k/sun3/sun3ints.c
index 6464ad3ae3e6..2d9e21bd313a 100644
--- a/trunk/arch/m68k/sun3/sun3ints.c
+++ b/trunk/arch/m68k/sun3/sun3ints.c
@@ -66,7 +66,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
#ifdef CONFIG_SUN3
intersil_clear();
#endif
- xtime_update(1);
+ do_timer(1);
update_process_times(user_mode(get_irq_regs()));
if (!(kstat_cpu(0).irqs[irq] % 20))
sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
diff --git a/trunk/arch/m68knommu/kernel/time.c b/trunk/arch/m68knommu/kernel/time.c
index 6623909f70e6..d6ac2a43453c 100644
--- a/trunk/arch/m68knommu/kernel/time.c
+++ b/trunk/arch/m68knommu/kernel/time.c
@@ -36,7 +36,7 @@ static inline int set_rtc_mmss(unsigned long nowtime)
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
irqreturn_t arch_timer_interrupt(int irq, void *dummy)
{
@@ -44,7 +44,11 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
if (current->pid)
profile_tick(CPU_PROFILING);
- xtime_update(1);
+ write_seqlock(&xtime_lock);
+
+ do_timer(1);
+
+ write_sequnlock(&xtime_lock);
update_process_times(user_mode(get_irq_regs()));
diff --git a/trunk/arch/m68knommu/kernel/vmlinux.lds.S b/trunk/arch/m68knommu/kernel/vmlinux.lds.S
index 47e15ebfd893..ef332136f96d 100644
--- a/trunk/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/trunk/arch/m68knommu/kernel/vmlinux.lds.S
@@ -141,12 +141,6 @@ SECTIONS {
*(__param)
__stop___param = .;
- /* Built-in module versions */
- . = ALIGN(4) ;
- __start___modver = .;
- *(__modver)
- __stop___modver = .;
-
. = ALIGN(4) ;
_etext = . ;
} > TEXT
diff --git a/trunk/arch/m68knommu/lib/Makefile b/trunk/arch/m68knommu/lib/Makefile
index 32d852e586d7..d94d709665aa 100644
--- a/trunk/arch/m68knommu/lib/Makefile
+++ b/trunk/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
- checksum.o memcpy.o memmove.o memset.o delay.o
+ checksum.o memcpy.o memset.o delay.o
diff --git a/trunk/arch/m68knommu/lib/memmove.c b/trunk/arch/m68knommu/lib/memmove.c
deleted file mode 100644
index b3dcfe9dab7e..000000000000
--- a/trunk/arch/m68knommu/lib/memmove.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#define __IN_STRING_C
-
-#include
-#include
-
-void *memmove(void *dest, const void *src, size_t n)
-{
- void *xdest = dest;
- size_t temp;
-
- if (!n)
- return xdest;
-
- if (dest < src) {
- if ((long)dest & 1) {
- char *cdest = dest;
- const char *csrc = src;
- *cdest++ = *csrc++;
- dest = cdest;
- src = csrc;
- n--;
- }
- if (n > 2 && (long)dest & 2) {
- short *sdest = dest;
- const short *ssrc = src;
- *sdest++ = *ssrc++;
- dest = sdest;
- src = ssrc;
- n -= 2;
- }
- temp = n >> 2;
- if (temp) {
- long *ldest = dest;
- const long *lsrc = src;
- temp--;
- do
- *ldest++ = *lsrc++;
- while (temp--);
- dest = ldest;
- src = lsrc;
- }
- if (n & 2) {
- short *sdest = dest;
- const short *ssrc = src;
- *sdest++ = *ssrc++;
- dest = sdest;
- src = ssrc;
- }
- if (n & 1) {
- char *cdest = dest;
- const char *csrc = src;
- *cdest = *csrc;
- }
- } else {
- dest = (char *)dest + n;
- src = (const char *)src + n;
- if ((long)dest & 1) {
- char *cdest = dest;
- const char *csrc = src;
- *--cdest = *--csrc;
- dest = cdest;
- src = csrc;
- n--;
- }
- if (n > 2 && (long)dest & 2) {
- short *sdest = dest;
- const short *ssrc = src;
- *--sdest = *--ssrc;
- dest = sdest;
- src = ssrc;
- n -= 2;
- }
- temp = n >> 2;
- if (temp) {
- long *ldest = dest;
- const long *lsrc = src;
- temp--;
- do
- *--ldest = *--lsrc;
- while (temp--);
- dest = ldest;
- src = lsrc;
- }
- if (n & 2) {
- short *sdest = dest;
- const short *ssrc = src;
- *--sdest = *--ssrc;
- dest = sdest;
- src = ssrc;
- }
- if (n & 1) {
- char *cdest = dest;
- const char *csrc = src;
- *--cdest = *--csrc;
- }
- }
- return xdest;
-}
-EXPORT_SYMBOL(memmove);
diff --git a/trunk/arch/m68knommu/platform/5249/intc2.c b/trunk/arch/m68knommu/platform/5249/intc2.c
index c5151f846591..d09d9da04537 100644
--- a/trunk/arch/m68knommu/platform/5249/intc2.c
+++ b/trunk/arch/m68knommu/platform/5249/intc2.c
@@ -50,10 +50,8 @@ static int __init mcf_intc2_init(void)
int irq;
/* GPIO interrupt sources */
- for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
+ for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++)
irq_desc[irq].chip = &intc2_irq_gpio_chip;
- set_irq_handler(irq, handle_edge_irq);
- }
return 0;
}
diff --git a/trunk/arch/m68knommu/platform/68328/entry.S b/trunk/arch/m68knommu/platform/68328/entry.S
index 676960cf022a..240a7a6e25c8 100644
--- a/trunk/arch/m68knommu/platform/68328/entry.S
+++ b/trunk/arch/m68knommu/platform/68328/entry.S
@@ -108,6 +108,7 @@ Luser_return:
movel %d1,%a2
1:
move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
+ andl #_TIF_WORK_MASK,%d1
jne Lwork_to_do
RESTORE_ALL
diff --git a/trunk/arch/m68knommu/platform/68360/commproc.c b/trunk/arch/m68knommu/platform/68360/commproc.c
index 8e4e10cc0080..f27e688c404e 100644
--- a/trunk/arch/m68knommu/platform/68360/commproc.c
+++ b/trunk/arch/m68knommu/platform/68360/commproc.c
@@ -210,7 +210,7 @@ void
cpm_install_handler(int vec, void (*handler)(), void *dev_id)
{
- request_irq(vec, handler, 0, "timer", dev_id);
+ request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id);
/* if (cpm_vecs[vec].handler != 0) */
/* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */
diff --git a/trunk/arch/m68knommu/platform/68360/config.c b/trunk/arch/m68knommu/platform/68360/config.c
index 9dd5bca38749..ac629fa30099 100644
--- a/trunk/arch/m68knommu/platform/68360/config.c
+++ b/trunk/arch/m68knommu/platform/68360/config.c
@@ -75,7 +75,7 @@ void hw_timer_init(void)
/* Set compare register 32Khz / 32 / 10 = 100 */
TCMP = 10;
- request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL);
+ request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
#endif
/* General purpose quicc timers: MC68360UM p7-20 */
diff --git a/trunk/arch/m68knommu/platform/68360/entry.S b/trunk/arch/m68knommu/platform/68360/entry.S
index 46c1b18c9dcb..8a28788c0eea 100644
--- a/trunk/arch/m68knommu/platform/68360/entry.S
+++ b/trunk/arch/m68knommu/platform/68360/entry.S
@@ -104,6 +104,7 @@ Luser_return:
movel %d1,%a2
1:
move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
+ andl #_TIF_WORK_MASK,%d1
jne Lwork_to_do
RESTORE_ALL
diff --git a/trunk/arch/m68knommu/platform/68360/ints.c b/trunk/arch/m68knommu/platform/68360/ints.c
index a29041c1a8a0..ad96ab1051f0 100644
--- a/trunk/arch/m68knommu/platform/68360/ints.c
+++ b/trunk/arch/m68knommu/platform/68360/ints.c
@@ -132,8 +132,8 @@ void init_IRQ(void)
pquicc->intr_cimr = 0x00000000;
for (i = 0; (i < NR_IRQS); i++) {
- set_irq_chip(i, &intc_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ set_irq_chip(irq, &intc_irq_chip);
+ set_irq_handler(irq, handle_level_irq);
}
}
diff --git a/trunk/arch/m68knommu/platform/coldfire/entry.S b/trunk/arch/m68knommu/platform/coldfire/entry.S
index 5837cf080b6d..4ddfc3da70d8 100644
--- a/trunk/arch/m68knommu/platform/coldfire/entry.S
+++ b/trunk/arch/m68knommu/platform/coldfire/entry.S
@@ -138,6 +138,7 @@ Luser_return:
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
+ andl #0xefff,%d1
jne Lwork_to_do /* still work to do */
Lreturn:
diff --git a/trunk/arch/microblaze/include/asm/futex.h b/trunk/arch/microblaze/include/asm/futex.h
index b0526d2716fa..ad3fd61b2fe7 100644
--- a/trunk/arch/microblaze/include/asm/futex.h
+++ b/trunk/arch/microblaze/include/asm/futex.h
@@ -29,7 +29,7 @@
})
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -94,34 +94,31 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- int ret = 0, cmp;
- u32 prev;
+ int prev, cmp;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- __asm__ __volatile__ ("1: lwx %1, %3, r0; \
- cmp %2, %1, %4; \
- beqi %2, 3f; \
- 2: swx %5, %3, r0; \
- addic %2, r0, 0; \
- bnei %2, 1b; \
+ __asm__ __volatile__ ("1: lwx %0, %2, r0; \
+ cmp %1, %0, %3; \
+ beqi %1, 3f; \
+ 2: swx %4, %2, r0; \
+ addic %1, r0, 0; \
+ bnei %1, 1b; \
3: \
.section .fixup,\"ax\"; \
4: brid 3b; \
- addik %0, r0, %6; \
+ addik %0, r0, %5; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,4b,2b,4b; \
.previous;" \
- : "+r" (ret), "=&r" (prev), "=&r"(cmp) \
+ : "=&r" (prev), "=&r"(cmp) \
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
- *uval = prev;
- return ret;
+ return prev;
}
#endif /* __KERNEL__ */
diff --git a/trunk/arch/microblaze/include/asm/irqflags.h b/trunk/arch/microblaze/include/asm/irqflags.h
index c4532f032b3b..5fd31905775d 100644
--- a/trunk/arch/microblaze/include/asm/irqflags.h
+++ b/trunk/arch/microblaze/include/asm/irqflags.h
@@ -12,7 +12,7 @@
#include
#include
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
static inline unsigned long arch_local_irq_save(void)
{
diff --git a/trunk/arch/microblaze/include/asm/pgtable.h b/trunk/arch/microblaze/include/asm/pgtable.h
index 885574a73f01..b23f68075879 100644
--- a/trunk/arch/microblaze/include/asm/pgtable.h
+++ b/trunk/arch/microblaze/include/asm/pgtable.h
@@ -411,19 +411,20 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
{
- unsigned long flags, old, tmp;
-
- raw_local_irq_save(flags);
-
- __asm__ __volatile__( "lw %0, %2, r0 \n"
- "andn %1, %0, %3 \n"
- "or %1, %1, %4 \n"
- "sw %1, %2, r0 \n"
- : "=&r" (old), "=&r" (tmp)
- : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
- : "cc");
-
- raw_local_irq_restore(flags);
+ unsigned long old, tmp, msr;
+
+ __asm__ __volatile__("\
+ msrclr %2, 0x2\n\
+ nop\n\
+ lw %0, %4, r0\n\
+ andn %1, %0, %5\n\
+ or %1, %1, %6\n\
+ sw %1, %4, r0\n\
+ mts rmsr, %2\n\
+ nop"
+ : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
+ : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
+ : "cc");
return old;
}
diff --git a/trunk/arch/microblaze/kernel/cpu/pvr.c b/trunk/arch/microblaze/kernel/cpu/pvr.c
index 488c1ed24e38..e01afa68273e 100644
--- a/trunk/arch/microblaze/kernel/cpu/pvr.c
+++ b/trunk/arch/microblaze/kernel/cpu/pvr.c
@@ -27,7 +27,7 @@
register unsigned tmp __asm__("r3"); \
tmp = 0x0; /* Prevent warning about unused */ \
__asm__ __volatile__ ( \
- "mfs %0, rpvr" #pvrid ";" \
+ "mfs %0, rpvr" #pvrid ";" \
: "=r" (tmp) : : "memory"); \
val = tmp; \
}
@@ -54,7 +54,7 @@ int cpu_has_pvr(void)
if (!(flags & PVR_MSR_BIT))
return 0;
- get_single_pvr(0, pvr0);
+ get_single_pvr(0x00, pvr0);
pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
if (pvr0 & PVR0_PVR_FULL_MASK)
diff --git a/trunk/arch/microblaze/kernel/head.S b/trunk/arch/microblaze/kernel/head.S
index 778a5ce2e4fc..0db20b5abb54 100644
--- a/trunk/arch/microblaze/kernel/head.S
+++ b/trunk/arch/microblaze/kernel/head.S
@@ -62,14 +62,15 @@ real_start:
andi r1, r1, ~2
mts rmsr, r1
/*
- * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
- * if the msrclr instruction is not enabled. We use this to detect
- * if the opcode is available, by issuing msrclr and then testing the result.
- * r8 == 0 - msr instructions are implemented
- * r8 != 0 - msr instructions are not implemented
+ * Here is checking mechanism which check if Microblaze has msr instructions
+ * We load msr and compare it with previous r1 value - if is the same,
+ * msr instructions works if not - cpu don't have them.
*/
- msrclr r8, 0 /* clear nothing - just read msr for test */
- cmpu r8, r8, r1 /* r1 must contain msr reg content */
+ /* r8=0 - I have msr instr, 1 - I don't have them */
+ rsubi r0, r0, 1 /* set the carry bit */
+ msrclr r0, 0x4 /* try to clear it */
+ /* read the carry bit, r8 will be '0' if msrclr exists */
+ addik r8, r0, 0
/* r7 may point to an FDT, or there may be one linked in.
if it's in r7, we've got to save it away ASAP.
diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c
index 9312fbb37efd..bb1558e4b283 100644
--- a/trunk/arch/microblaze/kernel/setup.c
+++ b/trunk/arch/microblaze/kernel/setup.c
@@ -161,11 +161,11 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr)
eprintk("!!!Your kernel has setup MSR instruction but "
- "CPU don't have it %x\n", msr);
+ "CPU don't have it %d\n", msr);
#else
if (!msr)
eprintk("!!!Your kernel not setup MSR instruction but "
- "CPU have it %x\n", msr);
+ "CPU have it %d\n", msr);
#endif
for (src = __ivt_start; src < __ivt_end; src++, dst++)
diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig
index d88983516e26..f5ecc0566bc2 100644
--- a/trunk/arch/mips/Kconfig
+++ b/trunk/arch/mips/Kconfig
@@ -4,7 +4,6 @@ config MIPS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
select HAVE_OPROFILE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB
@@ -209,7 +208,6 @@ config MACH_JZ4740
select ARCH_REQUIRE_GPIOLIB
select SYS_HAS_EARLY_PRINTK
select HAVE_PWM
- select HAVE_CLK
config LASAT
bool "LASAT Networks platforms"
@@ -335,8 +333,6 @@ config PNX8550_STB810
config PMC_MSP
bool "PMC-Sierra MSP chipsets"
depends on EXPERIMENTAL
- select CEVT_R4K
- select CSRC_R4K
select DMA_NONCOHERENT
select SWAP_IO_SPACE
select NO_EXCEPT_FILL
diff --git a/trunk/arch/mips/alchemy/mtx-1/board_setup.c b/trunk/arch/mips/alchemy/mtx-1/board_setup.c
index 40b84b991191..6398fa95905c 100644
--- a/trunk/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/trunk/arch/mips/alchemy/mtx-1/board_setup.c
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
static void mtx1_reset(char *c)
{
- /* Jump to the reset vector */
- __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
+ /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
+ au_writel(0x00000000, 0xAE00001C);
}
static void mtx1_power_off(void)
diff --git a/trunk/arch/mips/alchemy/mtx-1/platform.c b/trunk/arch/mips/alchemy/mtx-1/platform.c
index 956f946218c5..e30e42add697 100644
--- a/trunk/arch/mips/alchemy/mtx-1/platform.c
+++ b/trunk/arch/mips/alchemy/mtx-1/platform.c
@@ -28,8 +28,6 @@
#include
#include
-#include
-
static struct gpio_keys_button mtx1_gpio_button[] = {
{
.gpio = 207,
@@ -142,17 +140,10 @@ static struct __initdata platform_device * mtx1_devs[] = {
&mtx1_mtd,
};
-static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
- .phy_search_highest_addr = 1,
- .phy1_search_mac0 = 1,
-};
-
static int __init mtx1_register_devices(void)
{
int rc;
- au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
-
rc = gpio_request(mtx1_gpio_button[0].gpio,
mtx1_gpio_button[0].desc);
if (rc < 0) {
diff --git a/trunk/arch/mips/alchemy/xxs1500/board_setup.c b/trunk/arch/mips/alchemy/xxs1500/board_setup.c
index 80c521e5290d..b43c918925d3 100644
--- a/trunk/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/trunk/arch/mips/alchemy/xxs1500/board_setup.c
@@ -36,8 +36,8 @@
static void xxs1500_reset(char *c)
{
- /* Jump to the reset vector */
- __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
+ /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
+ au_writel(0x00000000, 0xAE00001C);
}
static void xxs1500_power_off(void)
diff --git a/trunk/arch/mips/include/asm/futex.h b/trunk/arch/mips/include/asm/futex.h
index 6ebf1734b411..b9cce90346cf 100644
--- a/trunk/arch/mips/include/asm/futex.h
+++ b/trunk/arch/mips/include/asm/futex.h
@@ -75,7 +75,7 @@
}
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -132,13 +132,11 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- int ret = 0;
- u32 val;
+ int retval;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -147,25 +145,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .set push \n"
" .set noat \n"
" .set mips3 \n"
- "1: ll %1, %3 \n"
- " bne %1, %z4, 3f \n"
+ "1: ll %0, %2 \n"
+ " bne %0, %z3, 3f \n"
" .set mips0 \n"
- " move $1, %z5 \n"
+ " move $1, %z4 \n"
" .set mips3 \n"
- "2: sc $1, %2 \n"
+ "2: sc $1, %1 \n"
" beqzl $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
- "4: li %0, %6 \n"
+ "4: li %0, %5 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
- : "+r" (ret), "=&r" (val), "=R" (*uaddr)
+ : "=&r" (retval), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
@@ -174,32 +172,31 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .set push \n"
" .set noat \n"
" .set mips3 \n"
- "1: ll %1, %3 \n"
- " bne %1, %z4, 3f \n"
+ "1: ll %0, %2 \n"
+ " bne %0, %z3, 3f \n"
" .set mips0 \n"
- " move $1, %z5 \n"
+ " move $1, %z4 \n"
" .set mips3 \n"
- "2: sc $1, %2 \n"
+ "2: sc $1, %1 \n"
" beqz $1, 1b \n"
__WEAK_LLSC_MB
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
- "4: li %0, %6 \n"
+ "4: li %0, %5 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
- : "+r" (ret), "=&r" (val), "=R" (*uaddr)
+ : "=&r" (retval), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory");
} else
return -ENOSYS;
- *uval = val;
- return ret;
+ return retval;
}
#endif
diff --git a/trunk/arch/mips/include/asm/perf_event.h b/trunk/arch/mips/include/asm/perf_event.h
index d0c77496c728..e00007cf8162 100644
--- a/trunk/arch/mips/include/asm/perf_event.h
+++ b/trunk/arch/mips/include/asm/perf_event.h
@@ -11,5 +11,15 @@
#ifndef __MIPS_PERF_EVENT_H__
#define __MIPS_PERF_EVENT_H__
-/* Leave it empty here. The file is required by linux/perf_event.h */
+
+/*
+ * MIPS performance counters do not raise NMI upon overflow, a regular
+ * interrupt will be signaled. Hence we can do the pending perf event
+ * work at the tail of the irq handler.
+ */
+static inline void
+set_perf_event_pending(void)
+{
+}
+
#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/trunk/arch/mips/kernel/ftrace.c b/trunk/arch/mips/kernel/ftrace.c
index 94ca2b018af7..5a84a1f11231 100644
--- a/trunk/arch/mips/kernel/ftrace.c
+++ b/trunk/arch/mips/kernel/ftrace.c
@@ -17,13 +17,29 @@
#include
#include
-#include
+/*
+ * If the Instruction Pointer is in module space (0xc0000000), return true;
+ * otherwise, it is in kernel space (0x80000000), return false.
+ *
+ * FIXME: This will not work when the kernel space and module space are the
+ * same. If they are the same, we need to modify scripts/recordmcount.pl,
+ * ftrace_make_nop/call() and the other related parts to ensure the
+ * enabling/disabling of the calling site to _mcount is right for both kernel
+ * and module.
+ */
+
+static inline int in_module(unsigned long ip)
+{
+ return ip & 0x40000000;
+}
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
+#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
+#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
#define INSN_NOP 0x00000000 /* nop */
#define INSN_JAL(addr) \
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -53,20 +69,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
#endif
}
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
- if (ip >= (unsigned long)_stext &&
- ip <= (unsigned long)_etext)
- return 1;
- return 0;
-}
-
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
@@ -82,42 +84,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return 0;
}
-/*
- * The details about the calling site of mcount on MIPS
- *
- * 1. For kernel:
- *
- * move at, ra
- * jal _mcount --> nop
- *
- * 2. For modules:
- *
- * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
- *
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
- * addiu v1, v1, low_16bit_of_mcount
- * move at, ra
- * move $12, ra_address
- * jalr v1
- * sub sp, sp, 8
- * 1: offset = 5 instructions
- * 2.2 For the Other situations
- *
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
- * addiu v1, v1, low_16bit_of_mcount
- * move at, ra
- * jalr v1
- * nop | move $12, ra_address | sub sp, sp, 8
- * 1: offset = 4 instructions
- */
-
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-#define MCOUNT_OFFSET_INSNS 5
-#else
-#define MCOUNT_OFFSET_INSNS 4
-#endif
-#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
-
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@@ -125,11 +91,39 @@ int ftrace_make_nop(struct module *mod,
unsigned long ip = rec->ip;
/*
- * If ip is in kernel space, no long call, otherwise, long call is
- * needed.
+ * We have compiled module with -mlong-calls, but compiled the kernel
+ * without it, we need to cope with them respectively.
*/
- new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
-
+ if (in_module(ip)) {
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+ /*
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ */
+ new = INSN_B_1F_5;
+#else
+ /*
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
+ */
+ new = INSN_B_1F_4;
+#endif
+ } else {
+ /*
+ * move at, ra
+ * jal _mcount --> nop
+ */
+ new = INSN_NOP;
+ }
return ftrace_modify_code(ip, new);
}
@@ -138,8 +132,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new;
unsigned long ip = rec->ip;
- new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
- insn_lui_v1_hi16_mcount;
+ /* ip, module: 0xc0000000, kernel: 0x80000000 */
+ new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
return ftrace_modify_code(ip, new);
}
@@ -196,25 +190,29 @@ int ftrace_disable_ftrace_graph_caller(void)
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
-unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
- old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+unsigned long ftrace_get_parent_addr(unsigned long self_addr,
+ unsigned long parent,
+ unsigned long parent_addr,
+ unsigned long fp)
{
- unsigned long sp, ip, tmp;
+ unsigned long sp, ip, ra;
unsigned int code;
int faulted;
/*
- * For module, move the ip from the return address after the
- * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
- * kernel, move after the instruction "move ra, at"(offset is 16)
+ * For module, move the ip from calling site of mcount to the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
+ * kernel, move to the instruction "move ra, at"(offset is 12)
*/
- ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+ ip = self_addr - (in_module(self_addr) ? 20 : 12);
/*
* search the text until finding the non-store instruction or "s{d,w}
* ra, offset(sp)" instruction
*/
do {
+ ip -= 4;
+
/* get the code at "ip": code = *(unsigned int *)ip; */
safe_load_code(code, ip, faulted);
@@ -226,20 +224,18 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
* store the ra on the stack
*/
if ((code & S_R_SP) != S_R_SP)
- return parent_ra_addr;
+ return parent_addr;
- /* Move to the next instruction */
- ip -= 4;
- } while ((code & S_RA_SP) != S_RA_SP);
+ } while (((code & S_RA_SP) != S_RA_SP));
sp = fp + (code & OFFSET_MASK);
- /* tmp = *(unsigned long *)sp; */
- safe_load_stack(tmp, sp, faulted);
+ /* ra = *(unsigned long *)sp; */
+ safe_load_stack(ra, sp, faulted);
if (unlikely(faulted))
return 0;
- if (tmp == old_parent_ra)
+ if (ra == parent)
return sp;
return 0;
}
@@ -250,21 +246,21 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
-void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long fp)
{
- unsigned long old_parent_ra;
+ unsigned long old;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
- int faulted, insns;
+ int faulted;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
/*
- * "parent_ra_addr" is the stack address saved the return address of
- * the caller of _mcount.
+ * "parent" is the stack address saved the return address of the caller
+ * of _mcount.
*
* if the gcc < 4.5, a leaf function does not save the return address
* in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -279,44 +275,37 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
* do it in ftrace_graph_caller of mcount.S.
*/
- /* old_parent_ra = *parent_ra_addr; */
- safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
+ /* old = *parent; */
+ safe_load_stack(old, parent, faulted);
if (unlikely(faulted))
goto out;
#ifndef KBUILD_MCOUNT_RA_ADDRESS
- parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
- old_parent_ra, (unsigned long)parent_ra_addr, fp);
+ parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
+ (unsigned long)parent, fp);
/*
* If fails when getting the stack address of the non-leaf function's
* ra, stop function graph tracer and return
*/
- if (parent_ra_addr == 0)
+ if (parent == 0)
goto out;
#endif
- /* *parent_ra_addr = return_hooker; */
- safe_store_stack(return_hooker, parent_ra_addr, faulted);
+ /* *parent = return_hooker; */
+ safe_store_stack(return_hooker, parent, faulted);
if (unlikely(faulted))
goto out;
- if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
- == -EBUSY) {
- *parent_ra_addr = old_parent_ra;
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
+ -EBUSY) {
+ *parent = old;
return;
}
- /*
- * Get the recorded ip of the current mcount calling site in the
- * __mcount_loc section, which will be used to filter the function
- * entries configured through the tracing/set_graph_function interface.
- */
-
- insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
- trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+ trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
- *parent_ra_addr = old_parent_ra;
+ *parent = old;
}
return;
out:
diff --git a/trunk/arch/mips/kernel/perf_event.c b/trunk/arch/mips/kernel/perf_event.c
index a8244854d3dc..2b7f3f703b83 100644
--- a/trunk/arch/mips/kernel/perf_event.c
+++ b/trunk/arch/mips/kernel/perf_event.c
@@ -161,6 +161,41 @@ mipspmu_event_set_period(struct perf_event *event,
return ret;
}
+static int mipspmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ /* To look for a free counter for this event. */
+ idx = mipspmu->alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipspmu->disable_event(idx);
+ cpuc->events[idx] = event;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, idx);
+
+ /* Enable the event. */
+ mipspmu->enable_event(hwc, idx);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ return err;
+}
+
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
@@ -169,7 +204,7 @@ static void mipspmu_event_update(struct perf_event *event,
unsigned long flags;
int shift = 64 - TOTAL_BITS;
s64 prev_raw_count, new_raw_count;
- u64 delta;
+ s64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -196,90 +231,32 @@ static void mipspmu_event_update(struct perf_event *event,
return;
}
-static void mipspmu_start(struct perf_event *event, int flags)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (!mipspmu)
- return;
-
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
- hwc->state = 0;
-
- /* Set the period for the event. */
- mipspmu_event_set_period(event, hwc, hwc->idx);
-
- /* Enable the event. */
- mipspmu->enable_event(hwc, hwc->idx);
-}
-
-static void mipspmu_stop(struct perf_event *event, int flags)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (!mipspmu)
- return;
-
- if (!(hwc->state & PERF_HES_STOPPED)) {
- /* We are working on a local event. */
- mipspmu->disable_event(hwc->idx);
- barrier();
- mipspmu_event_update(event, hwc, hwc->idx);
- hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
- }
-}
-
-static int mipspmu_add(struct perf_event *event, int flags)
+static void mipspmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- int idx;
- int err = 0;
+ int idx = hwc->idx;
- perf_pmu_disable(event->pmu);
- /* To look for a free counter for this event. */
- idx = mipspmu->alloc_counter(cpuc, hwc);
- if (idx < 0) {
- err = idx;
- goto out;
- }
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
- /*
- * If there is an event in the counter we are going to use then
- * make sure it is disabled.
- */
- event->hw.idx = idx;
+ /* We are working on a local event. */
mipspmu->disable_event(idx);
- cpuc->events[idx] = event;
- hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- if (flags & PERF_EF_START)
- mipspmu_start(event, PERF_EF_RELOAD);
+ barrier();
- /* Propagate our changes to the userspace mapping. */
- perf_event_update_userpage(event);
+ mipspmu_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
-out:
- perf_pmu_enable(event->pmu);
- return err;
+ perf_event_update_userpage(event);
}
-static void mipspmu_del(struct perf_event *event, int flags)
+static void mipspmu_unthrottle(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
-
- mipspmu_stop(event, PERF_EF_UPDATE);
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
-
- perf_event_update_userpage(event);
+ mipspmu->enable_event(hwc, hwc->idx);
}
static void mipspmu_read(struct perf_event *event)
@@ -293,17 +270,12 @@ static void mipspmu_read(struct perf_event *event)
mipspmu_event_update(event, hwc, hwc->idx);
}
-static void mipspmu_enable(struct pmu *pmu)
-{
- if (mipspmu)
- mipspmu->start();
-}
-
-static void mipspmu_disable(struct pmu *pmu)
-{
- if (mipspmu)
- mipspmu->stop();
-}
+static struct pmu pmu = {
+ .enable = mipspmu_enable,
+ .disable = mipspmu_disable,
+ .unthrottle = mipspmu_unthrottle,
+ .read = mipspmu_read,
+};
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -346,82 +318,6 @@ static void mipspmu_free_irq(void)
perf_irq = save_perf_irq;
}
-/*
- * mipsxx/rm9000/loongson2 have different performance counters, they have
- * specific low-level init routines.
- */
-static void reset_counters(void *arg);
-static int __hw_perf_event_init(struct perf_event *event);
-
-static void hw_perf_event_destroy(struct perf_event *event)
-{
- if (atomic_dec_and_mutex_lock(&active_events,
- &pmu_reserve_mutex)) {
- /*
- * We must not call the destroy function with interrupts
- * disabled.
- */
- on_each_cpu(reset_counters,
- (void *)(long)mipspmu->num_counters, 1);
- mipspmu_free_irq();
- mutex_unlock(&pmu_reserve_mutex);
- }
-}
-
-static int mipspmu_event_init(struct perf_event *event)
-{
- int err = 0;
-
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
- return -ENOENT;
- }
-
- if (!mipspmu || event->cpu >= nr_cpumask_bits ||
- (event->cpu >= 0 && !cpu_online(event->cpu)))
- return -ENODEV;
-
- if (!atomic_inc_not_zero(&active_events)) {
- if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
- atomic_dec(&active_events);
- return -ENOSPC;
- }
-
- mutex_lock(&pmu_reserve_mutex);
- if (atomic_read(&active_events) == 0)
- err = mipspmu_get_irq();
-
- if (!err)
- atomic_inc(&active_events);
- mutex_unlock(&pmu_reserve_mutex);
- }
-
- if (err)
- return err;
-
- err = __hw_perf_event_init(event);
- if (err)
- hw_perf_event_destroy(event);
-
- return err;
-}
-
-static struct pmu pmu = {
- .pmu_enable = mipspmu_enable,
- .pmu_disable = mipspmu_disable,
- .event_init = mipspmu_event_init,
- .add = mipspmu_add,
- .del = mipspmu_del,
- .start = mipspmu_start,
- .stop = mipspmu_stop,
- .read = mipspmu_read,
-};
-
static inline unsigned int
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
@@ -486,9 +382,8 @@ static int validate_event(struct cpu_hw_events *cpuc,
{
struct hw_perf_event fake_hwc = event->hw;
- /* Allow mixed event group. So return 1 to pass validation. */
- if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
- return 1;
+ if (event->pmu && event->pmu != &pmu)
+ return 0;
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
}
@@ -514,6 +409,73 @@ static int validate_group(struct perf_event *event)
return 0;
}
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu->num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return ERR_PTR(-ENODEV);
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+ atomic_dec(&active_events);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return ERR_PTR(err);
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err ? ERR_PTR(err) : &pmu;
+}
+
+void hw_perf_enable(void)
+{
+ if (mipspmu)
+ mipspmu->start();
+}
+
+void hw_perf_disable(void)
+{
+ if (mipspmu)
+ mipspmu->stop();
+}
+
/* This is needed by specific irq handlers in perf_event_*.c */
static void
handle_associated_event(struct cpu_hw_events *cpuc,
@@ -534,13 +496,21 @@ handle_associated_event(struct cpu_hw_events *cpuc,
#include "perf_event_mipsxx.c"
/* Callchain handling code. */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we add here.
*/
-void perf_callchain_user(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
}
@@ -553,21 +523,23 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
- perf_callchain_store(entry, addr);
+ callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
@@ -577,12 +549,53 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
return;
}
do {
- perf_callchain_store(entry, pc);
+ callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
save_raw_perf_callchain(entry, sp);
#endif
}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+ if (regs)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}
diff --git a/trunk/arch/mips/kernel/perf_event_mipsxx.c b/trunk/arch/mips/kernel/perf_event_mipsxx.c
index d9a7db78ed62..183e0d226669 100644
--- a/trunk/arch/mips/kernel/perf_event_mipsxx.c
+++ b/trunk/arch/mips/kernel/perf_event_mipsxx.c
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
* interrupt, not NMI.
*/
if (handled == IRQ_HANDLED)
- irq_work_run();
+ perf_event_do_pending();
#ifdef CONFIG_MIPS_MT_SMP
read_unlock(&pmuint_rwlock);
@@ -1045,8 +1045,6 @@ init_hw_perf_events(void)
"CPU, irq %d%s\n", mipspmu->name, counters, irq,
irq < 0 ? " (share with timer interrupt)" : "");
- perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-
return 0;
}
early_initcall(init_hw_perf_events);
diff --git a/trunk/arch/mips/kernel/signal.c b/trunk/arch/mips/kernel/signal.c
index dbbe0ce48d89..5922342bca39 100644
--- a/trunk/arch/mips/kernel/signal.c
+++ b/trunk/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
- int err, tmp __maybe_unused;
+ int err, tmp;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
diff --git a/trunk/arch/mips/kernel/signal32.c b/trunk/arch/mips/kernel/signal32.c
index aae986613795..a0ed0e052b2e 100644
--- a/trunk/arch/mips/kernel/signal32.c
+++ b/trunk/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
{
- int err, tmp __maybe_unused;
+ int err, tmp;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c
index 32a256101082..383aeb95cb49 100644
--- a/trunk/arch/mips/kernel/smp.c
+++ b/trunk/arch/mips/kernel/smp.c
@@ -193,22 +193,6 @@ void __devinit smp_prepare_boot_cpu(void)
*/
static struct task_struct *cpu_idle_thread[NR_CPUS];
-struct create_idle {
- struct work_struct work;
- struct task_struct *idle;
- struct completion done;
- int cpu;
-};
-
-static void __cpuinit do_fork_idle(struct work_struct *work)
-{
- struct create_idle *c_idle =
- container_of(work, struct create_idle, work);
-
- c_idle->idle = fork_idle(c_idle->cpu);
- complete(&c_idle->done);
-}
-
int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
@@ -219,19 +203,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
* Linux can schedule processes on this slave.
*/
if (!cpu_idle_thread[cpu]) {
- /*
- * Schedule work item to avoid forking user task
- * Ported from arch/x86/kernel/smpboot.c
- */
- struct create_idle c_idle = {
- .cpu = cpu,
- .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
- };
-
- INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
- schedule_work(&c_idle.work);
- wait_for_completion(&c_idle.done);
- idle = cpu_idle_thread[cpu] = c_idle.idle;
+ idle = fork_idle(cpu);
+ cpu_idle_thread[cpu] = idle;
if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/trunk/arch/mips/kernel/syscall.c b/trunk/arch/mips/kernel/syscall.c
index 58beabf50b3c..1dc6edff45e0 100644
--- a/trunk/arch/mips/kernel/syscall.c
+++ b/trunk/arch/mips/kernel/syscall.c
@@ -383,11 +383,12 @@ save_static_function(sys_sysmips);
static int __used noinline
_sys_sysmips(nabi_no_regargs struct pt_regs regs)
{
- long cmd, arg1, arg2;
+ long cmd, arg1, arg2, arg3;
cmd = regs.regs[4];
arg1 = regs.regs[5];
arg2 = regs.regs[6];
+ arg3 = regs.regs[7];
switch (cmd) {
case MIPS_ATOMIC_SET:
@@ -404,7 +405,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
if (arg1 & 2)
set_thread_flag(TIF_LOGADE);
else
- clear_thread_flag(TIF_LOGADE);
+ clear_thread_flag(TIF_FIXADE);
return 0;
diff --git a/trunk/arch/mips/kernel/vpe.c b/trunk/arch/mips/kernel/vpe.c
index ab52b7cf3b6b..6a1fdfef8fde 100644
--- a/trunk/arch/mips/kernel/vpe.c
+++ b/trunk/arch/mips/kernel/vpe.c
@@ -148,9 +148,9 @@ struct {
spinlock_t tc_list_lock;
struct list_head tc_list; /* Thread contexts */
} vpecontrol = {
- .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
+ .vpe_list_lock = SPIN_LOCK_UNLOCKED,
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
- .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
+ .tc_list_lock = SPIN_LOCK_UNLOCKED,
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};
diff --git a/trunk/arch/mips/loongson/Kconfig b/trunk/arch/mips/loongson/Kconfig
index aca93eed8779..6e1b77fec7ea 100644
--- a/trunk/arch/mips/loongson/Kconfig
+++ b/trunk/arch/mips/loongson/Kconfig
@@ -1,7 +1,6 @@
-if MACH_LOONGSON
-
choice
prompt "Machine Type"
+ depends on MACH_LOONGSON
config LEMOTE_FULOONG2E
bool "Lemote Fuloong(2e) mini-PC"
@@ -88,5 +87,3 @@ config LOONGSON_UART_BASE
config LOONGSON_MC146818
bool
default n
-
-endif # MACH_LOONGSON
diff --git a/trunk/arch/mips/loongson/common/cmdline.c b/trunk/arch/mips/loongson/common/cmdline.c
index 353e1d2e41a5..1a06defc4f7f 100644
--- a/trunk/arch/mips/loongson/common/cmdline.c
+++ b/trunk/arch/mips/loongson/common/cmdline.c
@@ -44,5 +44,10 @@ void __init prom_init_cmdline(void)
strcat(arcs_cmdline, " ");
}
+ if ((strstr(arcs_cmdline, "console=")) == NULL)
+ strcat(arcs_cmdline, " console=ttyS0,115200");
+ if ((strstr(arcs_cmdline, "root=")) == NULL)
+ strcat(arcs_cmdline, " root=/dev/hda1");
+
prom_init_machtype();
}
diff --git a/trunk/arch/mips/loongson/common/machtype.c b/trunk/arch/mips/loongson/common/machtype.c
index 2efd5d9dee27..81fbe6b73f91 100644
--- a/trunk/arch/mips/loongson/common/machtype.c
+++ b/trunk/arch/mips/loongson/common/machtype.c
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
void __init prom_init_machtype(void)
{
- char *p, str[MACHTYPE_LEN + 1];
+ char *p, str[MACHTYPE_LEN];
int machtype = MACH_LEMOTE_FL2E;
mips_machtype = LOONGSON_MACHTYPE;
@@ -53,7 +53,6 @@ void __init prom_init_machtype(void)
}
p += strlen("machtype=");
strncpy(str, p, MACHTYPE_LEN);
- str[MACHTYPE_LEN] = '\0';
p = strstr(str, " ");
if (p)
*p = '\0';
diff --git a/trunk/arch/mips/math-emu/ieee754int.h b/trunk/arch/mips/math-emu/ieee754int.h
index 2a7d43f4f161..2701d9500959 100644
--- a/trunk/arch/mips/math-emu/ieee754int.h
+++ b/trunk/arch/mips/math-emu/ieee754int.h
@@ -70,7 +70,7 @@
#define COMPXSP \
- unsigned xm; int xe; int xs __maybe_unused; int xc
+ unsigned xm; int xe; int xs; int xc
#define COMPYSP \
unsigned ym; int ye; int ys; int yc
@@ -104,7 +104,7 @@
#define COMPXDP \
-u64 xm; int xe; int xs __maybe_unused; int xc
+u64 xm; int xe; int xs; int xc
#define COMPYDP \
u64 ym; int ye; int ys; int yc
diff --git a/trunk/arch/mips/mm/init.c b/trunk/arch/mips/mm/init.c
index 279599e9a779..2efcbd24c82f 100644
--- a/trunk/arch/mips/mm/init.c
+++ b/trunk/arch/mips/mm/init.c
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long lastpfn __maybe_unused;
+ unsigned long lastpfn;
pagetable_init();
diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c
index 04f9e17db9d0..083d3412d0bc 100644
--- a/trunk/arch/mips/mm/tlbex.c
+++ b/trunk/arch/mips/mm/tlbex.c
@@ -109,8 +109,6 @@ static bool scratchpad_available(void)
static int scratchpad_offset(int i)
{
BUG();
- /* Really unreachable, but evidently some GCC want this. */
- return 0;
}
#endif
/*
diff --git a/trunk/arch/mips/pci/ops-pmcmsp.c b/trunk/arch/mips/pci/ops-pmcmsp.c
index 68798f869c0f..b7c03d80c88c 100644
--- a/trunk/arch/mips/pci/ops-pmcmsp.c
+++ b/trunk/arch/mips/pci/ops-pmcmsp.c
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
* RETURNS: PCIBIOS_SUCCESSFUL - success
*
****************************************************************************/
-static irqreturn_t bpci_interrupt(int irq, void *dev_id)
+static int bpci_interrupt(int irq, void *dev_id)
{
struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@ static irqreturn_t bpci_interrupt(int irq, void *dev_id)
/* write to clear all asserted interrupts */
preg->if_status = stat;
- return IRQ_HANDLED;
+ return PCIBIOS_SUCCESSFUL;
}
/*****************************************************************************
diff --git a/trunk/arch/mips/pmc-sierra/Kconfig b/trunk/arch/mips/pmc-sierra/Kconfig
index 8d798497c614..c139988bb85d 100644
--- a/trunk/arch/mips/pmc-sierra/Kconfig
+++ b/trunk/arch/mips/pmc-sierra/Kconfig
@@ -4,11 +4,15 @@ choice
config PMC_MSP4200_EVAL
bool "PMC-Sierra MSP4200 Eval Board"
+ select CEVT_R4K
+ select CSRC_R4K
select IRQ_MSP_SLP
select HW_HAS_PCI
config PMC_MSP4200_GW
bool "PMC-Sierra MSP4200 VoIP Gateway"
+ select CEVT_R4K
+ select CSRC_R4K
select IRQ_MSP_SLP
select HW_HAS_PCI
diff --git a/trunk/arch/mips/pmc-sierra/msp71xx/msp_time.c b/trunk/arch/mips/pmc-sierra/msp71xx/msp_time.c
index 01df84ce31e2..cca64e15f57f 100644
--- a/trunk/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/trunk/arch/mips/pmc-sierra/msp71xx/msp_time.c
@@ -81,7 +81,7 @@ void __init plat_time_init(void)
mips_hpt_frequency = cpu_rate/2;
}
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int __init get_c0_compare_int(void)
{
return MSP_INT_VPE0_TIMER;
}
diff --git a/trunk/arch/mn10300/include/asm/atomic.h b/trunk/arch/mn10300/include/asm/atomic.h
index 9d773a639513..92d2f9298e38 100644
--- a/trunk/arch/mn10300/include/asm/atomic.h
+++ b/trunk/arch/mn10300/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
-#define atomic_read(v) (ACCESS_ONCE((v)->counter))
+#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
diff --git a/trunk/arch/mn10300/include/asm/uaccess.h b/trunk/arch/mn10300/include/asm/uaccess.h
index 3d6e60dad9d9..679dee0bbd08 100644
--- a/trunk/arch/mn10300/include/asm/uaccess.h
+++ b/trunk/arch/mn10300/include/asm/uaccess.h
@@ -160,10 +160,9 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_check(x, ptr, size) \
({ \
- const __typeof__(ptr) __guc_ptr = (ptr); \
int _e; \
- if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
- _e = __get_user_nocheck((x), __guc_ptr, (size)); \
+ if (likely(__access_ok((unsigned long) (ptr), (size)))) \
+ _e = __get_user_nocheck((x), (ptr), (size)); \
else { \
_e = -EFAULT; \
(x) = (__typeof__(x))0; \
diff --git a/trunk/arch/mn10300/kernel/time.c b/trunk/arch/mn10300/kernel/time.c
index 5b955000626d..75da468090b9 100644
--- a/trunk/arch/mn10300/kernel/time.c
+++ b/trunk/arch/mn10300/kernel/time.c
@@ -104,6 +104,8 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
unsigned tsc, elapse;
irqreturn_t ret;
+ write_seqlock(&xtime_lock);
+
while (tsc = get_cycles(),
elapse = tsc - mn10300_last_tsc, /* time elapsed since last
* tick */
@@ -112,9 +114,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
mn10300_last_tsc += MN10300_TSC_PER_HZ;
/* advance the kernel's time tracking system */
- xtime_update(1);
+ do_timer(1);
}
+ write_sequnlock(&xtime_lock);
+
ret = local_timer_interrupt();
#ifdef CONFIG_SMP
send_IPI_allbutself(LOCAL_TIMER_IPI);
diff --git a/trunk/arch/mn10300/mm/cache-inv-icache.c b/trunk/arch/mn10300/mm/cache-inv-icache.c
index a6b63dde603d..a8933a60b2d4 100644
--- a/trunk/arch/mn10300/mm/cache-inv-icache.c
+++ b/trunk/arch/mn10300/mm/cache-inv-icache.c
@@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
/* invalidate the icache coverage on that region */
mn10300_local_icache_inv_range2(addr + off, size);
- smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
}
/**
@@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
* directly */
start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
mn10300_icache_inv_range(start_page, end);
- smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
if (start_page == start)
goto done;
end = start_page;
diff --git a/trunk/arch/parisc/hpux/sys_hpux.c b/trunk/arch/parisc/hpux/sys_hpux.c
index 6ab9580b0b00..30394081d9b6 100644
--- a/trunk/arch/parisc/hpux/sys_hpux.c
+++ b/trunk/arch/parisc/hpux/sys_hpux.c
@@ -185,21 +185,26 @@ struct hpux_statfs {
int16_t f_pad;
};
-static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p)
+static int do_statfs_hpux(struct path *path, struct hpux_statfs *buf)
{
- struct hpux_statfs buf;
- memset(&buf, 0, sizeof(buf));
- buf.f_type = st->f_type;
- buf.f_bsize = st->f_bsize;
- buf.f_blocks = st->f_blocks;
- buf.f_bfree = st->f_bfree;
- buf.f_bavail = st->f_bavail;
- buf.f_files = st->f_files;
- buf.f_ffree = st->f_ffree;
- buf.f_fsid[0] = st->f_fsid.val[0];
- buf.f_fsid[1] = st->f_fsid.val[1];
- if (copy_to_user(p, &buf, sizeof(buf)))
- return -EFAULT;
+ struct kstatfs st;
+ int retval;
+
+ retval = vfs_statfs(path, &st);
+ if (retval)
+ return retval;
+
+ memset(buf, 0, sizeof(*buf));
+ buf->f_type = st.f_type;
+ buf->f_bsize = st.f_bsize;
+ buf->f_blocks = st.f_blocks;
+ buf->f_bfree = st.f_bfree;
+ buf->f_bavail = st.f_bavail;
+ buf->f_files = st.f_files;
+ buf->f_ffree = st.f_ffree;
+ buf->f_fsid[0] = st.f_fsid.val[0];
+ buf->f_fsid[1] = st.f_fsid.val[1];
+
return 0;
}
@@ -207,19 +212,35 @@ static int do_statfs_hpux(struct kstatfs *st, struct hpux_statfs __user *p)
asmlinkage long hpux_statfs(const char __user *pathname,
struct hpux_statfs __user *buf)
{
- struct kstatfs st;
- int error = user_statfs(pathname, &st);
- if (!error)
- error = do_statfs_hpux(&st, buf);
+ struct path path;
+ int error;
+
+ error = user_path(pathname, &path);
+ if (!error) {
+ struct hpux_statfs tmp;
+ error = do_statfs_hpux(&path, &tmp);
+ if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
+ error = -EFAULT;
+ path_put(&path);
+ }
return error;
}
asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
{
- struct kstatfs st;
- int error = fd_statfs(fd, &st);
- if (!error)
- error = do_statfs_hpux(&st, buf);
+ struct file *file;
+ struct hpux_statfs tmp;
+ int error;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ error = do_statfs_hpux(&file->f_path, &tmp);
+ if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
+ error = -EFAULT;
+ fput(file);
+ out:
return error;
}
diff --git a/trunk/arch/parisc/include/asm/futex.h b/trunk/arch/parisc/include/asm/futex.h
index 67a33cc27ef2..0c705c3a55ef 100644
--- a/trunk/arch/parisc/include/asm/futex.h
+++ b/trunk/arch/parisc/include/asm/futex.h
@@ -8,7 +8,7 @@
#include
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
/* Non-atomic version */
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- u32 val;
+ int err = 0;
+ int uval;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
return -EFAULT;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- if (get_user(val, uaddr))
- return -EFAULT;
- if (val == oldval && put_user(newval, uaddr))
- return -EFAULT;
- *uval = val;
- return 0;
+ err = get_user(uval, uaddr);
+ if (err) return -EFAULT;
+ if (uval == oldval)
+ err = put_user(newval, uaddr);
+ if (err) return -EFAULT;
+ return uval;
}
#endif /*__KERNEL__*/
diff --git a/trunk/arch/parisc/kernel/time.c b/trunk/arch/parisc/kernel/time.c
index 45b7389d77aa..05511ccb61d2 100644
--- a/trunk/arch/parisc/kernel/time.c
+++ b/trunk/arch/parisc/kernel/time.c
@@ -162,8 +162,11 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
}
- if (cpu == 0)
- xtime_update(ticks_elapsed);
+ if (cpu == 0) {
+ write_seqlock(&xtime_lock);
+ do_timer(ticks_elapsed);
+ write_sequnlock(&xtime_lock);
+ }
return IRQ_HANDLED;
}
diff --git a/trunk/arch/powerpc/include/asm/futex.h b/trunk/arch/powerpc/include/asm/futex.h
index c94e4a3fe2ef..7c589ef81fb0 100644
--- a/trunk/arch/powerpc/include/asm/futex.h
+++ b/trunk/arch/powerpc/include/asm/futex.h
@@ -30,7 +30,7 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -82,38 +82,35 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- int ret = 0;
- u32 prev;
+ int prev;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
-"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
- cmpw 0,%1,%4\n\
+"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\
+ cmpw 0,%0,%3\n\
bne- 3f\n"
- PPC405_ERR77(0,%3)
-"2: stwcx. %5,0,%3\n\
+ PPC405_ERR77(0,%2)
+"2: stwcx. %4,0,%2\n\
bne- 1b\n"
PPC_ACQUIRE_BARRIER
"3: .section .fixup,\"ax\"\n\
-4: li %0,%6\n\
+4: li %0,%5\n\
b 3b\n\
.previous\n\
.section __ex_table,\"a\"\n\
.align 3\n\
" PPC_LONG "1b,4b,2b,4b\n\
.previous" \
- : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
+ : "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory");
- *uval = prev;
- return ret;
+ return prev;
}
#endif /* __KERNEL__ */
diff --git a/trunk/arch/powerpc/include/asm/lppaca.h b/trunk/arch/powerpc/include/asm/lppaca.h
index 26b8c807f8f1..380d48bacd16 100644
--- a/trunk/arch/powerpc/include/asm/lppaca.h
+++ b/trunk/arch/powerpc/include/asm/lppaca.h
@@ -33,25 +33,9 @@
//
//----------------------------------------------------------------------------
#include
-#include
#include
#include
-/*
- * We only have to have statically allocated lppaca structs on
- * legacy iSeries, which supports at most 64 cpus.
- */
-#ifdef CONFIG_PPC_ISERIES
-#if NR_CPUS < 64
-#define NR_LPPACAS NR_CPUS
-#else
-#define NR_LPPACAS 64
-#endif
-#else /* not iSeries */
-#define NR_LPPACAS 1
-#endif
-
-
/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
* alignment is sufficient to prevent this */
struct lppaca {
diff --git a/trunk/arch/powerpc/include/asm/machdep.h b/trunk/arch/powerpc/include/asm/machdep.h
index fe56a23e1ff0..991d5998d6be 100644
--- a/trunk/arch/powerpc/include/asm/machdep.h
+++ b/trunk/arch/powerpc/include/asm/machdep.h
@@ -240,12 +240,6 @@ struct machdep_calls {
* claims to support kexec.
*/
int (*machine_kexec_prepare)(struct kimage *image);
-
- /* Called to perform the _real_ kexec.
- * Do NOT allocate memory or fail here. We are past the point of
- * no return.
- */
- void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND
diff --git a/trunk/arch/powerpc/include/asm/rwsem.h b/trunk/arch/powerpc/include/asm/rwsem.h
index bb1e2cdeb9bf..8447d89fbe72 100644
--- a/trunk/arch/powerpc/include/asm/rwsem.h
+++ b/trunk/arch/powerpc/include/asm/rwsem.h
@@ -13,6 +13,11 @@
* by Paul Mackerras .
*/
+#include
+#include
+#include
+#include
+
/*
* the semaphore definition
*/
@@ -28,6 +33,47 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+struct rw_semaphore {
+ long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+{ \
+ RWSEM_UNLOCKED_VALUE, \
+ __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) \
+}
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+ } while (0)
+
/*
* lock for reading
*/
@@ -128,5 +174,10 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return sem->count != 0;
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/trunk/arch/powerpc/kernel/machine_kexec.c b/trunk/arch/powerpc/kernel/machine_kexec.c
index a5f8672eeff3..49a170af8145 100644
--- a/trunk/arch/powerpc/kernel/machine_kexec.c
+++ b/trunk/arch/powerpc/kernel/machine_kexec.c
@@ -87,10 +87,7 @@ void machine_kexec(struct kimage *image)
save_ftrace_enabled = __ftrace_enabled_save();
- if (ppc_md.machine_kexec)
- ppc_md.machine_kexec(image);
- else
- default_machine_kexec(image);
+ default_machine_kexec(image);
__ftrace_enabled_restore(save_ftrace_enabled);
diff --git a/trunk/arch/powerpc/kernel/paca.c b/trunk/arch/powerpc/kernel/paca.c
index f4adf89d7614..ebf9846f3c3b 100644
--- a/trunk/arch/powerpc/kernel/paca.c
+++ b/trunk/arch/powerpc/kernel/paca.c
@@ -26,6 +26,20 @@ extern unsigned long __toc_start;
#ifdef CONFIG_PPC_BOOK3S
+/*
+ * We only have to have statically allocated lppaca structs on
+ * legacy iSeries, which supports at most 64 cpus.
+ */
+#ifdef CONFIG_PPC_ISERIES
+#if NR_CPUS < 64
+#define NR_LPPACAS NR_CPUS
+#else
+#define NR_LPPACAS 64
+#endif
+#else /* not iSeries */
+#define NR_LPPACAS 1
+#endif
+
/*
* The structure which the hypervisor knows about - this structure
* should not cross a page boundary. The vpa_init/register_vpa call
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index 8303a6c65ef7..7a1d5cb76932 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -353,7 +353,6 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
prime_debug_regs(new_thread);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
-#ifndef CONFIG_HAVE_HW_BREAKPOINT
static void set_debug_reg_defaults(struct thread_struct *thread)
{
if (thread->dabr) {
@@ -361,7 +360,6 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
set_dabr(0);
}
}
-#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
int set_dabr(unsigned long dabr)
@@ -672,11 +670,11 @@ void flush_thread(void)
{
discard_lazy_cpu_state();
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#ifdef CONFIG_HAVE_HW_BREAKPOINTS
flush_ptrace_hw_breakpoint(current);
-#else /* CONFIG_HAVE_HW_BREAKPOINT */
+#else /* CONFIG_HAVE_HW_BREAKPOINTS */
set_debug_reg_defaults(¤t->thread);
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
}
void
diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c
index 0dc95c0aa3be..fd4812329570 100644
--- a/trunk/arch/powerpc/mm/numa.c
+++ b/trunk/arch/powerpc/mm/numa.c
@@ -1516,8 +1516,7 @@ int start_topology_update(void)
{
int rc = 0;
- /* Disabled until races with load balancing are fixed */
- if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
+ if (firmware_has_feature(FW_FEATURE_VPHN) &&
get_lppaca()->shared_proc) {
vphn_enabled = 1;
setup_cpu_associativity_change_counters();
diff --git a/trunk/arch/powerpc/mm/tlb_hash64.c b/trunk/arch/powerpc/mm/tlb_hash64.c
index c14d09f614f3..1ec06576f619 100644
--- a/trunk/arch/powerpc/mm/tlb_hash64.c
+++ b/trunk/arch/powerpc/mm/tlb_hash64.c
@@ -38,11 +38,13 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* neesd to be flushed. This function will either perform the flush
* immediately or will batch it up if the current CPU has an active
* batch on it.
+ *
+ * Must be called from within some kind of spinlock/non-preempt region...
*/
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge)
{
- struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
+ struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long vsid, vaddr;
unsigned int psize;
int ssize;
@@ -97,7 +99,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (!batch->active) {
flush_hash_page(vaddr, rpte, psize, ssize, 0);
- put_cpu_var(ppc64_tlb_batch);
return;
}
@@ -126,7 +127,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch);
- put_cpu_var(ppc64_tlb_batch);
}
/*
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c b/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c
index a3d2ce54ea2e..187a7d32f86a 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
if (!IS_ERR(tmp)) {
struct nameidata nd;
- ret = kern_path_parent(tmp, &nd);
+ ret = path_lookup(tmp, LOOKUP_PARENT, &nd);
if (!ret) {
nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
ret = spufs_create(&nd, flags, mode, neighbor);
diff --git a/trunk/arch/powerpc/platforms/iseries/dt.c b/trunk/arch/powerpc/platforms/iseries/dt.c
index f0491cc28900..fdb7384c0c4f 100644
--- a/trunk/arch/powerpc/platforms/iseries/dt.c
+++ b/trunk/arch/powerpc/platforms/iseries/dt.c
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
- for (i = 0; i < NR_LPPACAS; i++) {
- if (lppaca[i].dyn_proc_status >= 2)
+ for (i = 0; i < NR_CPUS; i++) {
+ if (lppaca_of(i).dyn_proc_status >= 2)
continue;
snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
dt_prop_str(dt, "device_type", device_type_cpu);
- index = lppaca[i].dyn_hv_phys_proc_index;
+ index = lppaca_of(i).dyn_hv_phys_proc_index;
d = &xIoHriProcessorVpd[index];
dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
diff --git a/trunk/arch/powerpc/platforms/iseries/setup.c b/trunk/arch/powerpc/platforms/iseries/setup.c
index 2946ae10fbfd..b0863410517f 100644
--- a/trunk/arch/powerpc/platforms/iseries/setup.c
+++ b/trunk/arch/powerpc/platforms/iseries/setup.c
@@ -680,7 +680,6 @@ void * __init iSeries_early_setup(void)
* on but calling this function multiple times is fine.
*/
identify_cpu(0, mfspr(SPRN_PVR));
- initialise_paca(&boot_paca, 0);
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/trunk/arch/s390/boot/compressed/misc.c b/trunk/arch/s390/boot/compressed/misc.c
index 2751b3a8a66f..0851eb1e919e 100644
--- a/trunk/arch/s390/boot/compressed/misc.c
+++ b/trunk/arch/s390/boot/compressed/misc.c
@@ -133,12 +133,11 @@ unsigned long decompress_kernel(void)
unsigned long output_addr;
unsigned char *output;
- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+ check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start);
memset(&_bss, 0, &_ebss - &_bss);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL);
#ifdef CONFIG_BLK_DEV_INITRD
/*
diff --git a/trunk/arch/s390/crypto/sha_common.c b/trunk/arch/s390/crypto/sha_common.c
index 48884f89ab92..f42dbabc0d30 100644
--- a/trunk/arch/s390/crypto/sha_common.c
+++ b/trunk/arch/s390/crypto/sha_common.c
@@ -38,7 +38,6 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
BUG_ON(ret != bsize);
data += bsize - index;
len -= bsize - index;
- index = 0;
}
/* process as many blocks as possible */
diff --git a/trunk/arch/s390/include/asm/atomic.h b/trunk/arch/s390/include/asm/atomic.h
index 5c5ba10384c2..76daea117181 100644
--- a/trunk/arch/s390/include/asm/atomic.h
+++ b/trunk/arch/s390/include/asm/atomic.h
@@ -36,19 +36,14 @@
static inline int atomic_read(const atomic_t *v)
{
- int c;
-
- asm volatile(
- " l %0,%1\n"
- : "=d" (c) : "Q" (v->counter));
- return c;
+ barrier();
+ return v->counter;
}
static inline void atomic_set(atomic_t *v, int i)
{
- asm volatile(
- " st %1,%0\n"
- : "=Q" (v->counter) : "d" (i));
+ v->counter = i;
+ barrier();
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -133,19 +128,14 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
static inline long long atomic64_read(const atomic64_t *v)
{
- long long c;
-
- asm volatile(
- " lg %0,%1\n"
- : "=d" (c) : "Q" (v->counter));
- return c;
+ barrier();
+ return v->counter;
}
static inline void atomic64_set(atomic64_t *v, long long i)
{
- asm volatile(
- " stg %1,%0\n"
- : "=Q" (v->counter) : "d" (i));
+ v->counter = i;
+ barrier();
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/trunk/arch/s390/include/asm/cache.h b/trunk/arch/s390/include/asm/cache.h
index 2a30d5ac0667..24aafa68b643 100644
--- a/trunk/arch/s390/include/asm/cache.h
+++ b/trunk/arch/s390/include/asm/cache.h
@@ -13,7 +13,6 @@
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
-#define NET_SKB_PAD 32
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
diff --git a/trunk/arch/s390/include/asm/futex.h b/trunk/arch/s390/include/asm/futex.h
index 81cf36b691f1..5c5d02de49e9 100644
--- a/trunk/arch/s390/include/asm/futex.h
+++ b/trunk/arch/s390/include/asm/futex.h
@@ -7,7 +7,7 @@
#include
#include
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
+ int oldval, int newval)
{
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
+ return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
}
#endif /* __KERNEL__ */
diff --git a/trunk/arch/s390/include/asm/processor.h b/trunk/arch/s390/include/asm/processor.h
index 2c79b6416271..bf3de04170a7 100644
--- a/trunk/arch/s390/include/asm/processor.h
+++ b/trunk/arch/s390/include/asm/processor.h
@@ -148,6 +148,11 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
*/
extern unsigned long thread_saved_pc(struct task_struct *t);
+/*
+ * Print register of task into buffer. Used in fs/proc/array.c.
+ */
+extern void task_show_regs(struct seq_file *m, struct task_struct *task);
+
extern void show_code(struct pt_regs *regs);
unsigned long get_wchan(struct task_struct *p);
diff --git a/trunk/arch/s390/include/asm/rwsem.h b/trunk/arch/s390/include/asm/rwsem.h
index d0eb4653cebd..423fdda2322d 100644
--- a/trunk/arch/s390/include/asm/rwsem.h
+++ b/trunk/arch/s390/include/asm/rwsem.h
@@ -43,6 +43,29 @@
#ifdef __KERNEL__
+#include
+#include
+
+struct rwsem_waiter;
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
#ifndef __s390x__
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
@@ -57,6 +80,41 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+/*
+ * initialisation
+ */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+
/*
* lock for reading
*/
@@ -319,5 +377,10 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return new;
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
diff --git a/trunk/arch/s390/include/asm/uaccess.h b/trunk/arch/s390/include/asm/uaccess.h
index 2d9ea11f919a..d6b1ed0ec52b 100644
--- a/trunk/arch/s390/include/asm/uaccess.h
+++ b/trunk/arch/s390/include/asm/uaccess.h
@@ -83,8 +83,8 @@ struct uaccess_ops {
size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *);
size_t (*strncpy_from_user)(size_t, const char __user *, char *);
- int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
- int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
+ int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
+ int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
};
extern struct uaccess_ops uaccess;
diff --git a/trunk/arch/s390/kernel/traps.c b/trunk/arch/s390/kernel/traps.c
index b5a4a739b477..5eb78dd584ce 100644
--- a/trunk/arch/s390/kernel/traps.c
+++ b/trunk/arch/s390/kernel/traps.c
@@ -237,6 +237,43 @@ void show_regs(struct pt_regs *regs)
show_last_breaking_event(regs);
}
+/* This is called from fs/proc/array.c */
+void task_show_regs(struct seq_file *m, struct task_struct *task)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(task);
+ seq_printf(m, "task: %p, ksp: %p\n",
+ task, (void *)task->thread.ksp);
+ seq_printf(m, "User PSW : %p %p\n",
+ (void *) regs->psw.mask, (void *)regs->psw.addr);
+
+ seq_printf(m, "User GPRS: " FOURLONG,
+ regs->gprs[0], regs->gprs[1],
+ regs->gprs[2], regs->gprs[3]);
+ seq_printf(m, " " FOURLONG,
+ regs->gprs[4], regs->gprs[5],
+ regs->gprs[6], regs->gprs[7]);
+ seq_printf(m, " " FOURLONG,
+ regs->gprs[8], regs->gprs[9],
+ regs->gprs[10], regs->gprs[11]);
+ seq_printf(m, " " FOURLONG,
+ regs->gprs[12], regs->gprs[13],
+ regs->gprs[14], regs->gprs[15]);
+ seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
+ task->thread.acrs[0], task->thread.acrs[1],
+ task->thread.acrs[2], task->thread.acrs[3]);
+ seq_printf(m, " %08x %08x %08x %08x\n",
+ task->thread.acrs[4], task->thread.acrs[5],
+ task->thread.acrs[6], task->thread.acrs[7]);
+ seq_printf(m, " %08x %08x %08x %08x\n",
+ task->thread.acrs[8], task->thread.acrs[9],
+ task->thread.acrs[10], task->thread.acrs[11]);
+ seq_printf(m, " %08x %08x %08x %08x\n",
+ task->thread.acrs[12], task->thread.acrs[13],
+ task->thread.acrs[14], task->thread.acrs[15]);
+}
+
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
diff --git a/trunk/arch/s390/lib/uaccess.h b/trunk/arch/s390/lib/uaccess.h
index 1d2536cb630b..126011df14f1 100644
--- a/trunk/arch/s390/lib/uaccess.h
+++ b/trunk/arch/s390/lib/uaccess.h
@@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *);
extern size_t copy_to_user_std(size_t, void __user *, const void *);
extern size_t strnlen_user_std(size_t, const char __user *);
extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
-extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32);
-extern int futex_atomic_op_std(int, u32 __user *, int, int *);
+extern int futex_atomic_cmpxchg_std(int __user *, int, int);
+extern int futex_atomic_op_std(int, int __user *, int, int *);
extern size_t copy_from_user_pt(size_t, const void __user *, void *);
extern size_t copy_to_user_pt(size_t, void __user *, const void *);
-extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
-extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
+extern int futex_atomic_op_pt(int, int __user *, int, int *);
+extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c
index 74833831417f..404f2de296dc 100644
--- a/trunk/arch/s390/lib/uaccess_pt.c
+++ b/trunk/arch/s390/lib/uaccess_pt.c
@@ -302,7 +302,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc" );
-static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
+static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
{
int oldval = 0, newval, ret;
@@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
return ret;
}
-int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
+int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
{
int ret;
@@ -354,29 +354,26 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
return ret;
}
-static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
{
int ret;
asm volatile("0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
+ "1: lr %0,%1\n"
"2:\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory" );
- *uval = oldval;
return ret;
}
-int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
{
int ret;
if (segment_eq(get_fs(), KERNEL_DS))
- return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
+ return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
spin_lock(¤t->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) {
@@ -385,7 +382,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
}
get_page(virt_to_page(uaddr));
spin_unlock(¤t->mm->page_table_lock);
- ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
+ ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
put_page(virt_to_page(uaddr));
return ret;
}
diff --git a/trunk/arch/s390/lib/uaccess_std.c b/trunk/arch/s390/lib/uaccess_std.c
index bb1a7eed42ce..a6c4f7ed24a4 100644
--- a/trunk/arch/s390/lib/uaccess_std.c
+++ b/trunk/arch/s390/lib/uaccess_std.c
@@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
-int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
+int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
{
int oldval = 0, newval, ret;
@@ -287,21 +287,19 @@ int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
return ret;
}
-int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
{
int ret;
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
+ "1: lr %0,%1\n"
"2: sacf 0\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory" );
- *uval = oldval;
return ret;
}
diff --git a/trunk/arch/sh/include/asm/futex-irq.h b/trunk/arch/sh/include/asm/futex-irq.h
index 6cb9f193a95e..a9f16a7f9aea 100644
--- a/trunk/arch/sh/include/asm/futex-irq.h
+++ b/trunk/arch/sh/include/asm/futex-irq.h
@@ -3,7 +3,7 @@
#include
-static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
+static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
@@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
return ret;
}
-static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
+static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
@@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
return ret;
}
-static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
+static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
@@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
return ret;
}
-static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
+static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
@@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
return ret;
}
-static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
+static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
int *oldval)
{
unsigned long flags;
@@ -88,13 +88,11 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
return ret;
}
-static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
- u32 __user *uaddr,
- u32 oldval, u32 newval)
+static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
+ int oldval, int newval)
{
unsigned long flags;
- int ret;
- u32 prev = 0;
+ int ret, prev = 0;
local_irq_save(flags);
@@ -104,8 +102,10 @@ static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
local_irq_restore(flags);
- *uval = prev;
- return ret;
+ if (ret)
+ return ret;
+
+ return prev;
}
#endif /* __ASM_SH_FUTEX_IRQ_H */
diff --git a/trunk/arch/sh/include/asm/futex.h b/trunk/arch/sh/include/asm/futex.h
index 7be39a646fbd..68256ec5fa35 100644
--- a/trunk/arch/sh/include/asm/futex.h
+++ b/trunk/arch/sh/include/asm/futex.h
@@ -10,7 +10,7 @@
/* XXX: UP variants, fix for SH-4A and SMP.. */
#include
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -65,13 +65,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
+ return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval);
}
#endif /* __KERNEL__ */
diff --git a/trunk/arch/sh/include/asm/rwsem.h b/trunk/arch/sh/include/asm/rwsem.h
index edab57265293..06e2251a5e48 100644
--- a/trunk/arch/sh/include/asm/rwsem.h
+++ b/trunk/arch/sh/include/asm/rwsem.h
@@ -11,13 +11,64 @@
#endif
#ifdef __KERNEL__
+#include
+#include
+#include
+#include
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
/*
* lock for reading
@@ -128,5 +179,10 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_SH_RWSEM_H */
diff --git a/trunk/arch/sh/include/asm/sections.h b/trunk/arch/sh/include/asm/sections.h
index 4a5350037c8f..a78701da775b 100644
--- a/trunk/arch/sh/include/asm/sections.h
+++ b/trunk/arch/sh/include/asm/sections.h
@@ -3,7 +3,7 @@
#include
-extern long __nosave_begin, __nosave_end;
+extern void __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char _ebss[];
diff --git a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index e53b4b38bd11..672944f5b19c 100644
--- a/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/trunk/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,7 +14,7 @@
#include
#include
#include
-#include
+#include
static struct resource rtc_resources[] = {
[0] = {
@@ -255,17 +255,12 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- struct platform_device *dev[1];
-
if (mach_is_rts7751r2d()) {
scif_platform_data.scscr |= SCSCR_CKE1;
- dev[0] = &scif_device;
- early_platform_add_devices(dev, 1);
+ early_platform_add_devices(&scif_device, 1);
} else {
- dev[0] = &sci_device;
- early_platform_add_devices(dev, 1);
- dev[0] = &scif_device;
- early_platform_add_devices(dev, 1);
+ early_platform_add_devices(&sci_device, 1);
+ early_platform_add_devices(&scif_device, 1);
}
early_platform_add_devices(sh7750_early_devices,
diff --git a/trunk/arch/sh/lib/delay.c b/trunk/arch/sh/lib/delay.c
index 0901b2f14e15..faa8f86c0db4 100644
--- a/trunk/arch/sh/lib/delay.c
+++ b/trunk/arch/sh/lib/delay.c
@@ -10,16 +10,6 @@
void __delay(unsigned long loops)
{
__asm__ __volatile__(
- /*
- * ST40-300 appears to have an issue with this code,
- * normally taking two cycles each loop, as with all
- * other SH variants. If however the branch and the
- * delay slot straddle an 8 byte boundary, this increases
- * to 3 cycles.
- * This align directive ensures this doesn't occur.
- */
- ".balign 8\n\t"
-
"tst %0, %0\n\t"
"1:\t"
"bf/s 1b\n\t"
diff --git a/trunk/arch/sh/mm/cache.c b/trunk/arch/sh/mm/cache.c
index 5a580ea04429..88d3dc3d30d5 100644
--- a/trunk/arch/sh/mm/cache.c
+++ b/trunk/arch/sh/mm/cache.c
@@ -108,8 +108,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kunmap_atomic(vfrom, KM_USER0);
}
- if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
- (vma->vm_flags & VM_EXEC))
+ if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
__flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vto, KM_USER1);
diff --git a/trunk/arch/sparc/include/asm/futex_64.h b/trunk/arch/sparc/include/asm/futex_64.h
index 444e7bea23bc..47f95839dc69 100644
--- a/trunk/arch/sparc/include/asm/futex_64.h
+++ b/trunk/arch/sparc/include/asm/futex_64.h
@@ -30,7 +30,7 @@
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+ if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL;
@@ -85,30 +85,26 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
- int ret = 0;
-
__asm__ __volatile__(
- "\n1: casa [%4] %%asi, %3, %1\n"
+ "\n1: casa [%3] %%asi, %2, %0\n"
"2:\n"
" .section .fixup,#alloc,#execinstr\n"
" .align 4\n"
"3: sethi %%hi(2b), %0\n"
" jmpl %0 + %%lo(2b), %%g0\n"
- " mov %5, %0\n"
+ " mov %4, %0\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .align 4\n"
" .word 1b, 3b\n"
" .previous\n"
- : "+r" (ret), "=r" (newval)
- : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
+ : "=r" (newval)
+ : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
: "memory");
- *uval = newval;
- return ret;
+ return newval;
}
#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/trunk/arch/sparc/include/asm/pcr.h b/trunk/arch/sparc/include/asm/pcr.h
index 843e4faf6a50..a2f5c61f924e 100644
--- a/trunk/arch/sparc/include/asm/pcr.h
+++ b/trunk/arch/sparc/include/asm/pcr.h
@@ -43,6 +43,4 @@ static inline u64 picl_value(unsigned int nmi_hz)
extern u64 pcr_enable;
-extern int pcr_arch_init(void);
-
#endif /* __PCR_H */
diff --git a/trunk/arch/sparc/include/asm/rwsem.h b/trunk/arch/sparc/include/asm/rwsem.h
index 069bf4d663a1..a2b4302869bc 100644
--- a/trunk/arch/sparc/include/asm/rwsem.h
+++ b/trunk/arch/sparc/include/asm/rwsem.h
@@ -13,12 +13,53 @@
#ifdef __KERNEL__
+#include
+#include
+
+struct rwsem_waiter;
+
+struct rw_semaphore {
+ signed long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000L
#define RWSEM_ACTIVE_BIAS 0x00000001L
#define RWSEM_ACTIVE_MASK 0xffffffffL
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
/*
* lock for reading
@@ -119,6 +160,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _SPARC64_RWSEM_H */
diff --git a/trunk/arch/sparc/kernel/iommu.c b/trunk/arch/sparc/kernel/iommu.c
index 72509d0e34be..47977a77f6c6 100644
--- a/trunk/arch/sparc/kernel/iommu.c
+++ b/trunk/arch/sparc/kernel/iommu.c
@@ -255,9 +255,10 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
- int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
+ int sz = IOMMU_NUM_CTXS - lowest;
+ int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
- if (unlikely(n == IOMMU_NUM_CTXS)) {
+ if (unlikely(n == sz)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
diff --git a/trunk/arch/sparc/kernel/pcic.c b/trunk/arch/sparc/kernel/pcic.c
index 2cdc131b50ac..aeaa09a3c655 100644
--- a/trunk/arch/sparc/kernel/pcic.c
+++ b/trunk/arch/sparc/kernel/pcic.c
@@ -700,8 +700,10 @@ static void pcic_clear_clock_irq(void)
static irqreturn_t pcic_timer_handler (int irq, void *h)
{
+ write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
- xtime_update(1);
+ do_timer(1);
+ write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
diff --git a/trunk/arch/sparc/kernel/pcr.c b/trunk/arch/sparc/kernel/pcr.c
index 7c2ced612b8f..ae96cf52a955 100644
--- a/trunk/arch/sparc/kernel/pcr.c
+++ b/trunk/arch/sparc/kernel/pcr.c
@@ -167,3 +167,5 @@ int __init pcr_arch_init(void)
unregister_perf_hsvc();
return err;
}
+
+early_initcall(pcr_arch_init);
diff --git a/trunk/arch/sparc/kernel/smp_64.c b/trunk/arch/sparc/kernel/smp_64.c
index 555a76d1f4a1..b6a2b8f47040 100644
--- a/trunk/arch/sparc/kernel/smp_64.c
+++ b/trunk/arch/sparc/kernel/smp_64.c
@@ -49,7 +49,6 @@
#include
#include
#include
-#include
#include "cpumap.h"
@@ -1359,7 +1358,6 @@ void __cpu_die(unsigned int cpu)
void __init smp_cpus_done(unsigned int max_cpus)
{
- pcr_arch_init();
}
void smp_send_reschedule(int cpu)
diff --git a/trunk/arch/sparc/kernel/time_32.c b/trunk/arch/sparc/kernel/time_32.c
index 4211bfc9bcad..9c743b1886ff 100644
--- a/trunk/arch/sparc/kernel/time_32.c
+++ b/trunk/arch/sparc/kernel/time_32.c
@@ -85,7 +85,7 @@ int update_persistent_clock(struct timespec now)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
+ * as well as call the "do_timer()" routine every clocktick
*/
#define TICK_SIZE (tick_nsec / 1000)
@@ -96,9 +96,14 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
profile_tick(CPU_PROFILING);
#endif
+ /* Protect counter clear so that do_gettimeoffset works */
+ write_seqlock(&xtime_lock);
+
clear_clock_irq();
- xtime_update(1);
+ do_timer(1);
+
+ write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
diff --git a/trunk/arch/sparc/kernel/una_asm_32.S b/trunk/arch/sparc/kernel/una_asm_32.S
index 8f096e84a937..8cc03458eb7e 100644
--- a/trunk/arch/sparc/kernel/una_asm_32.S
+++ b/trunk/arch/sparc/kernel/una_asm_32.S
@@ -24,9 +24,9 @@ retl_efault:
.globl __do_int_store
__do_int_store:
ld [%o2], %g1
- cmp %o1, 2
+ cmp %1, 2
be 2f
- cmp %o1, 4
+ cmp %1, 4
be 1f
srl %g1, 24, %g2
srl %g1, 16, %g7
diff --git a/trunk/arch/sparc/lib/atomic32.c b/trunk/arch/sparc/lib/atomic32.c
index d3c7a12ad879..cbddeb38ffda 100644
--- a/trunk/arch/sparc/lib/atomic32.c
+++ b/trunk/arch/sparc/lib/atomic32.c
@@ -16,7 +16,7 @@
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
+ [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
#else /* SMP */
diff --git a/trunk/arch/sparc/lib/bitext.c b/trunk/arch/sparc/lib/bitext.c
index 48d00e72ce15..764b3eb7b604 100644
--- a/trunk/arch/sparc/lib/bitext.c
+++ b/trunk/arch/sparc/lib/bitext.c
@@ -10,7 +10,7 @@
*/
#include
-#include
+#include
#include
@@ -80,7 +80,8 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
while (test_bit(offset + i, t->map) == 0) {
i++;
if (i == len) {
- bitmap_set(t->map, offset, len);
+ for (i = 0; i < len; i++)
+ __set_bit(offset + i, t->map);
if (offset == t->first_free)
t->first_free = find_next_zero_bit
(t->map, t->size,
diff --git a/trunk/arch/tile/include/asm/futex.h b/trunk/arch/tile/include/asm/futex.h
index d03ec124a598..fe0d10dcae57 100644
--- a/trunk/arch/tile/include/asm/futex.h
+++ b/trunk/arch/tile/include/asm/futex.h
@@ -29,16 +29,16 @@
#include
#include
-extern struct __get_user futex_set(u32 __user *v, int i);
-extern struct __get_user futex_add(u32 __user *v, int n);
-extern struct __get_user futex_or(u32 __user *v, int n);
-extern struct __get_user futex_andn(u32 __user *v, int n);
-extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
+extern struct __get_user futex_set(int __user *v, int i);
+extern struct __get_user futex_add(int __user *v, int n);
+extern struct __get_user futex_or(int __user *v, int n);
+extern struct __get_user futex_andn(int __user *v, int n);
+extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
#ifndef __tilegx__
-extern struct __get_user futex_xor(u32 __user *v, int n);
+extern struct __get_user futex_xor(int __user *v, int n);
#else
-static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
+static inline struct __get_user futex_xor(int __user *uaddr, int n)
{
struct __get_user asm_ret = __get_user_4(uaddr);
if (!asm_ret.err) {
@@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
}
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
pagefault_disable();
@@ -119,17 +119,16 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret;
}
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+ int newval)
{
struct __get_user asm_ret;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
- *uval = asm_ret.val;
- return asm_ret.err;
+ return asm_ret.err ? asm_ret.err : asm_ret.val;
}
#ifndef __tilegx__
diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c
index c70e047eed72..975613b23dcf 100644
--- a/trunk/arch/um/drivers/mconsole_kern.c
+++ b/trunk/arch/um/drivers/mconsole_kern.c
@@ -124,18 +124,35 @@ void mconsole_log(struct mc_request *req)
#if 0
void mconsole_proc(struct mc_request *req)
{
+ struct nameidata nd;
struct vfsmount *mnt = current->nsproxy->pid_ns->proc_mnt;
struct file *file;
- int n;
+ int n, err;
char *ptr = req->request.data, *buf;
mm_segment_t old_fs = get_fs();
ptr += strlen("proc");
ptr = skip_spaces(ptr);
- file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
+ err = vfs_path_lookup(mnt->mnt_root, mnt, ptr, LOOKUP_FOLLOW, &nd);
+ if (err) {
+ mconsole_reply(req, "Failed to look up file", 1, 0);
+ goto out;
+ }
+
+ err = may_open(&nd.path, MAY_READ, O_RDONLY);
+ if (result) {
+ mconsole_reply(req, "Failed to open file", 1, 0);
+ path_put(&nd.path);
+ goto out;
+ }
+
+ file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY,
+ current_cred());
+ err = PTR_ERR(file);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
+ path_put(&nd.path);
goto out;
}
diff --git a/trunk/arch/um/drivers/ubd_kern.c b/trunk/arch/um/drivers/ubd_kern.c
index 620f5b70957d..ba4a98ba39c0 100644
--- a/trunk/arch/um/drivers/ubd_kern.c
+++ b/trunk/arch/um/drivers/ubd_kern.c
@@ -185,7 +185,7 @@ struct ubd {
.no_cow = 0, \
.shared = 0, \
.cow = DEFAULT_COW, \
- .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
+ .lock = SPIN_LOCK_UNLOCKED, \
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
diff --git a/trunk/arch/x86/boot/compressed/mkpiggy.c b/trunk/arch/x86/boot/compressed/mkpiggy.c
index 46a823882437..646aa78ba5fd 100644
--- a/trunk/arch/x86/boot/compressed/mkpiggy.c
+++ b/trunk/arch/x86/boot/compressed/mkpiggy.c
@@ -62,12 +62,7 @@ int main(int argc, char *argv[])
if (fseek(f, -4L, SEEK_END)) {
perror(argv[1]);
}
-
- if (fread(&olen, sizeof(olen), 1, f) != 1) {
- perror(argv[1]);
- return 1;
- }
-
+ fread(&olen, sizeof olen, 1, f);
ilen = ftell(f);
olen = getle32(&olen);
fclose(f);
diff --git a/trunk/arch/x86/ia32/ia32entry.S b/trunk/arch/x86/ia32/ia32entry.S
index 2f0d7b450150..518bb99c3394 100644
--- a/trunk/arch/x86/ia32/ia32entry.S
+++ b/trunk/arch/x86/ia32/ia32entry.S
@@ -25,8 +25,6 @@
#define sysretl_audit ia32_ret_from_sys_call
#endif
- .section .entry.text, "ax"
-
#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
.macro IA32_ARG_FIXUP noebp=0
@@ -853,7 +851,4 @@ ia32_sys_call_table:
.quad sys_fanotify_init
.quad sys32_fanotify_mark
.quad sys_prlimit64 /* 340 */
- .quad sys_name_to_handle_at
- .quad compat_sys_open_by_handle_at
- .quad compat_sys_clock_adjtime
ia32_syscall_end:
diff --git a/trunk/arch/x86/include/asm/acpi.h b/trunk/arch/x86/include/asm/acpi.h
index 4ea15ca89b2b..211ca3f7fd16 100644
--- a/trunk/arch/x86/include/asm/acpi.h
+++ b/trunk/arch/x86/include/asm/acpi.h
@@ -88,7 +88,6 @@ extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
-extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
diff --git a/trunk/arch/x86/include/asm/apic.h b/trunk/arch/x86/include/asm/apic.h
index 3c896946f4cc..5e3969c36d7f 100644
--- a/trunk/arch/x86/include/asm/apic.h
+++ b/trunk/arch/x86/include/asm/apic.h
@@ -233,7 +233,6 @@ extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void setup_local_APIC(void);
extern void end_local_APIC_setup(void);
-extern void bsp_end_local_APIC_setup(void);
extern void init_apic_mappings(void);
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
diff --git a/trunk/arch/x86/include/asm/ce4100.h b/trunk/arch/x86/include/asm/ce4100.h
deleted file mode 100644
index e656ad8c0a2e..000000000000
--- a/trunk/arch/x86/include/asm/ce4100.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_CE4100_H_
-#define _ASM_CE4100_H_
-
-int ce4100_pci_init(void);
-
-#endif
diff --git a/trunk/arch/x86/include/asm/cpu.h b/trunk/arch/x86/include/asm/cpu.h
index 4564c8e28a33..6e6e7558e702 100644
--- a/trunk/arch/x86/include/asm/cpu.h
+++ b/trunk/arch/x86/include/asm/cpu.h
@@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int);
DECLARE_PER_CPU(int, cpu_state);
-int mwait_usable(const struct cpuinfo_x86 *);
+int __cpuinit mwait_usable(const struct cpuinfo_x86 *);
#endif /* _ASM_X86_CPU_H */
diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h
index 91f3e087cf21..220e2ea08e80 100644
--- a/trunk/arch/x86/include/asm/cpufeature.h
+++ b/trunk/arch/x86/include/asm/cpufeature.h
@@ -160,7 +160,6 @@
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -280,7 +279,6 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
-#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
diff --git a/trunk/arch/x86/include/asm/futex.h b/trunk/arch/x86/include/asm/futex.h
index d09bb03653f0..1f11ce44e956 100644
--- a/trunk/arch/x86/include/asm/futex.h
+++ b/trunk/arch/x86/include/asm/futex.h
@@ -37,7 +37,7 @@
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
@@ -109,10 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret;
}
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+ int newval)
{
- int ret = 0;
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
/* Real i386 machines have no cmpxchg instruction */
@@ -120,22 +119,21 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -ENOSYS;
#endif
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+ asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
"2:\t.section .fixup, \"ax\"\n"
- "3:\tmov %3, %0\n"
+ "3:\tmov %2, %0\n"
"\tjmp 2b\n"
"\t.previous\n"
_ASM_EXTABLE(1b, 3b)
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
- *uval = oldval;
- return ret;
+ return oldval;
}
#endif
diff --git a/trunk/arch/x86/include/asm/kdebug.h b/trunk/arch/x86/include/asm/kdebug.h
index 518bbbb9ee59..ca242d35e873 100644
--- a/trunk/arch/x86/include/asm/kdebug.h
+++ b/trunk/arch/x86/include/asm/kdebug.h
@@ -13,6 +13,7 @@ enum die_val {
DIE_PANIC,
DIE_NMI,
DIE_DIE,
+ DIE_NMIWATCHDOG,
DIE_KERNELDEBUG,
DIE_TRAP,
DIE_GPF,
diff --git a/trunk/arch/x86/include/asm/msr-index.h b/trunk/arch/x86/include/asm/msr-index.h
index 823d48223400..4d0dfa0d998e 100644
--- a/trunk/arch/x86/include/asm/msr-index.h
+++ b/trunk/arch/x86/include/asm/msr-index.h
@@ -36,11 +36,6 @@
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
-#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
-#define NHM_C3_AUTO_DEMOTE (1UL << 25)
-#define NHM_C1_AUTO_DEMOTE (1UL << 26)
-#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
-
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
@@ -52,9 +47,6 @@
#define MSR_IA32_MCG_STATUS 0x0000017a
#define MSR_IA32_MCG_CTL 0x0000017b
-#define MSR_OFFCORE_RSP_0 0x000001a6
-#define MSR_OFFCORE_RSP_1 0x000001a7
-
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h
index 07f46016d3ff..c76f5b92b840 100644
--- a/trunk/arch/x86/include/asm/nmi.h
+++ b/trunk/arch/x86/include/asm/nmi.h
@@ -7,6 +7,7 @@
#ifdef CONFIG_X86_LOCAL_APIC
+extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
diff --git a/trunk/arch/x86/include/asm/perf_event_p4.h b/trunk/arch/x86/include/asm/perf_event_p4.h
index cc29086e30cd..e2f6a99f14ab 100644
--- a/trunk/arch/x86/include/asm/perf_event_p4.h
+++ b/trunk/arch/x86/include/asm/perf_event_p4.h
@@ -22,7 +22,6 @@
#define ARCH_P4_CNTRVAL_BITS (40)
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
-#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
#define P4_ESCR_EVENT_MASK 0x7e000000U
#define P4_ESCR_EVENT_SHIFT 25
diff --git a/trunk/arch/x86/include/asm/rwsem.h b/trunk/arch/x86/include/asm/rwsem.h
index df4cd32b4cc6..d1e41b0f9b60 100644
--- a/trunk/arch/x86/include/asm/rwsem.h
+++ b/trunk/arch/x86/include/asm/rwsem.h
@@ -37,9 +37,26 @@
#endif
#ifdef __KERNEL__
+
+#include
+#include
+#include
#include
+struct rwsem_waiter;
+
+extern asmregparm struct rw_semaphore *
+ rwsem_down_read_failed(struct rw_semaphore *sem);
+extern asmregparm struct rw_semaphore *
+ rwsem_down_write_failed(struct rw_semaphore *sem);
+extern asmregparm struct rw_semaphore *
+ rwsem_wake(struct rw_semaphore *);
+extern asmregparm struct rw_semaphore *
+ rwsem_downgrade_wake(struct rw_semaphore *sem);
+
/*
+ * the semaphore definition
+ *
* The bias values and the counter type limits the number of
* potential readers/writers to 32767 for 32 bits and 2147483647
* for 64 bits.
@@ -57,6 +74,43 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+typedef signed long rwsem_count_t;
+
+struct rw_semaphore {
+ rwsem_count_t count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+
+#define __RWSEM_INITIALIZER(name) \
+{ \
+ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
+}
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
/*
* lock for reading
*/
@@ -79,7 +133,7 @@ static inline void __down_read(struct rw_semaphore *sem)
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
- long result, tmp;
+ rwsem_count_t result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
" mov %0,%1\n\t"
"1:\n\t"
@@ -101,7 +155,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
- long tmp;
+ rwsem_count_t tmp;
asm volatile("# beginning down_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* adds 0xffff0001, returns the old value */
@@ -126,8 +180,9 @@ static inline void __down_write(struct rw_semaphore *sem)
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
+ rwsem_count_t ret = cmpxchg(&sem->count,
+ RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE)
return 1;
return 0;
@@ -138,7 +193,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
*/
static inline void __up_read(struct rw_semaphore *sem)
{
- long tmp;
+ rwsem_count_t tmp;
asm volatile("# beginning __up_read\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 1, returns the old value */
@@ -156,7 +211,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- long tmp;
+ rwsem_count_t tmp;
asm volatile("# beginning __up_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 0xffff0001, returns the old value */
@@ -192,7 +247,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
/*
* implement atomic add functionality
*/
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(rwsem_count_t delta,
+ struct rw_semaphore *sem)
{
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
: "+m" (sem->count)
@@ -202,9 +258,10 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
/*
* implement exchange and add functionality
*/
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
+ struct rw_semaphore *sem)
{
- long tmp = delta;
+ rwsem_count_t tmp = delta;
asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
@@ -213,5 +270,10 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
return tmp + delta;
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_RWSEM_H */
diff --git a/trunk/arch/x86/include/asm/smp.h b/trunk/arch/x86/include/asm/smp.h
index c1bbfa89a0e2..1f4695136776 100644
--- a/trunk/arch/x86/include/asm/smp.h
+++ b/trunk/arch/x86/include/asm/smp.h
@@ -17,20 +17,10 @@
#endif
#include
#include
-#include
extern int smp_num_siblings;
extern unsigned int num_processors;
-static inline bool cpu_has_ht_siblings(void)
-{
- bool has_siblings = false;
-#ifdef CONFIG_SMP
- has_siblings = cpu_has_ht && smp_num_siblings > 1;
-#endif
- return has_siblings;
-}
-
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
diff --git a/trunk/arch/x86/include/asm/smpboot_hooks.h b/trunk/arch/x86/include/asm/smpboot_hooks.h
index 725b77831993..6c22bf353f26 100644
--- a/trunk/arch/x86/include/asm/smpboot_hooks.h
+++ b/trunk/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
- *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+ *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
diff --git a/trunk/arch/x86/include/asm/unistd_32.h b/trunk/arch/x86/include/asm/unistd_32.h
index ffaf183c619a..b766a5e8ba0e 100644
--- a/trunk/arch/x86/include/asm/unistd_32.h
+++ b/trunk/arch/x86/include/asm/unistd_32.h
@@ -346,13 +346,10 @@
#define __NR_fanotify_init 338
#define __NR_fanotify_mark 339
#define __NR_prlimit64 340
-#define __NR_name_to_handle_at 341
-#define __NR_open_by_handle_at 342
-#define __NR_clock_adjtime 343
#ifdef __KERNEL__
-#define NR_syscalls 344
+#define NR_syscalls 341
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/trunk/arch/x86/include/asm/unistd_64.h b/trunk/arch/x86/include/asm/unistd_64.h
index 5466bea670e7..363e9b8a715b 100644
--- a/trunk/arch/x86/include/asm/unistd_64.h
+++ b/trunk/arch/x86/include/asm/unistd_64.h
@@ -669,12 +669,6 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_prlimit64 302
__SYSCALL(__NR_prlimit64, sys_prlimit64)
-#define __NR_name_to_handle_at 303
-__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
-#define __NR_open_by_handle_at 304
-__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
-#define __NR_clock_adjtime 305
-__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h
index 3e094af443c3..ce1d54c8a433 100644
--- a/trunk/arch/x86/include/asm/uv/uv_bau.h
+++ b/trunk/arch/x86/include/asm/uv/uv_bau.h
@@ -176,7 +176,7 @@ struct bau_msg_payload {
struct bau_msg_header {
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
- unsigned int base_dest_nodeid:15; /* nasid of the */
+ unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
/* bits 20:6 */ /* first bit in uvhub map */
unsigned int command:8; /* message type */
/* bits 28:21 */
diff --git a/trunk/arch/x86/include/asm/xen/hypercall.h b/trunk/arch/x86/include/asm/xen/hypercall.h
index 8508bfe52296..a3c28ae4025b 100644
--- a/trunk/arch/x86/include/asm/xen/hypercall.h
+++ b/trunk/arch/x86/include/asm/xen/hypercall.h
@@ -287,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set)
static inline int
HYPERVISOR_sched_op(int cmd, void *arg)
{
- return _hypercall2(int, sched_op, cmd, arg);
+ return _hypercall2(int, sched_op_new, cmd, arg);
}
static inline long
@@ -422,17 +422,10 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value)
#endif
static inline int
-HYPERVISOR_suspend(unsigned long start_info_mfn)
+HYPERVISOR_suspend(unsigned long srec)
{
- struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
-
- /*
- * For a PV guest the tools require that the start_info mfn be
- * present in rdx/edx when the hypercall is made. Per the
- * hypercall calling convention this is the third hypercall
- * argument, which is start_info_mfn here.
- */
- return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+ SHUTDOWN_suspend, srec);
}
static inline int
diff --git a/trunk/arch/x86/include/asm/xen/page.h b/trunk/arch/x86/include/asm/xen/page.h
index c61934fbf22a..f25bdf238a33 100644
--- a/trunk/arch/x86/include/asm/xen/page.h
+++ b/trunk/arch/x86/include/asm/xen/page.h
@@ -29,10 +29,8 @@ typedef struct xpaddr {
/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
#define INVALID_P2M_ENTRY (~0UL)
-#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
-#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
+#define FOREIGN_FRAME_BIT (1UL<<31)
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
-#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
/* Maximum amount of memory we can handle in a domain in pages */
#define MAX_DOMAIN_PAGES \
@@ -43,18 +41,12 @@ extern unsigned int machine_to_phys_order;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-extern unsigned long set_phys_range_identity(unsigned long pfn_s,
- unsigned long pfn_e);
extern int m2p_add_override(unsigned long mfn, struct page *page);
extern int m2p_remove_override(struct page *page);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
-#ifdef CONFIG_XEN_DEBUG_FS
-extern int p2m_dump_show(struct seq_file *m, void *v);
-#endif
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
@@ -65,7 +57,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
mfn = get_phys_to_machine(pfn);
if (mfn != INVALID_P2M_ENTRY)
- mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
+ mfn &= ~FOREIGN_FRAME_BIT;
return mfn;
}
@@ -81,44 +73,25 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
unsigned long pfn;
- int ret = 0;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
- if (unlikely((mfn >> machine_to_phys_order) != 0)) {
- pfn = ~0;
- goto try_override;
- }
pfn = 0;
/*
* The array access can fail (e.g., device space beyond end of RAM).
* In such cases it doesn't matter what we return (we return garbage),
* but we must handle the fault without crashing!
*/
- ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
-try_override:
- /* ret might be < 0 if there are no entries in the m2p for mfn */
- if (ret < 0)
- pfn = ~0;
- else if (get_phys_to_machine(pfn) != mfn)
- /*
- * If this appears to be a foreign mfn (because the pfn
- * doesn't map back to the mfn), then check the local override
- * table to see if there's a better pfn to use.
- *
- * m2p_find_override_pfn returns ~0 if it doesn't find anything.
- */
- pfn = m2p_find_override_pfn(mfn, ~0);
-
- /*
- * pfn is ~0 if there are no entries in the m2p for mfn or if the
- * entry doesn't map back to the mfn and m2p_override doesn't have a
- * valid entry for it.
+ __get_user(pfn, &machine_to_phys_mapping[mfn]);
+
+ /*
+ * If this appears to be a foreign mfn (because the pfn
+ * doesn't map back to the mfn), then check the local override
+ * table to see if there's a better pfn to use.
*/
- if (pfn == ~0 &&
- get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
- pfn = mfn;
+ if (get_phys_to_machine(pfn) != mfn)
+ pfn = m2p_find_override_pfn(mfn, pfn);
return pfn;
}
diff --git a/trunk/arch/x86/include/asm/xen/pci.h b/trunk/arch/x86/include/asm/xen/pci.h
index aa8620989162..2329b3eaf8d3 100644
--- a/trunk/arch/x86/include/asm/xen/pci.h
+++ b/trunk/arch/x86/include/asm/xen/pci.h
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
* its own functions.
*/
struct xen_pci_frontend_ops {
- int (*enable_msi)(struct pci_dev *dev, int vectors[]);
+ int (*enable_msi)(struct pci_dev *dev, int **vectors);
void (*disable_msi)(struct pci_dev *dev);
- int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
+ int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
void (*disable_msix)(struct pci_dev *dev);
};
extern struct xen_pci_frontend_ops *xen_pci_frontend;
static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
- int vectors[])
+ int **vectors)
{
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
return xen_pci_frontend->enable_msi(dev, vectors);
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
xen_pci_frontend->disable_msi(dev);
}
static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
- int vectors[], int nvec)
+ int **vectors, int nvec)
{
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
diff --git a/trunk/arch/x86/kernel/acpi/boot.c b/trunk/arch/x86/kernel/acpi/boot.c
index 3e6e2d68f761..b3a71137983a 100644
--- a/trunk/arch/x86/kernel/acpi/boot.c
+++ b/trunk/arch/x86/kernel/acpi/boot.c
@@ -72,7 +72,6 @@ u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
-int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -416,15 +415,10 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
- if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
- return 0;
- }
- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
- intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
- printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
- }
+ if (acpi_skip_timer_override &&
+ intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ return 0;
}
mp_override_legacy_irq(intsrc->source_irq,
diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c
index 7038b95d363f..123608531c8f 100644
--- a/trunk/arch/x86/kernel/alternative.c
+++ b/trunk/arch/x86/kernel/alternative.c
@@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
atomic_set(&stop_machine_first, 1);
wrote_text = 0;
- __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+ stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
}
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
diff --git a/trunk/arch/x86/kernel/apb_timer.c b/trunk/arch/x86/kernel/apb_timer.c
index 51d4e1663066..51ef31a89be9 100644
--- a/trunk/arch/x86/kernel/apb_timer.c
+++ b/trunk/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
- adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
+ apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
global_clock_event = &adev->evt;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c
index 76b96d74978a..06c196d7e59c 100644
--- a/trunk/arch/x86/kernel/apic/apic.c
+++ b/trunk/arch/x86/kernel/apic/apic.c
@@ -1381,17 +1381,12 @@ void __cpuinit end_local_APIC_setup(void)
#endif
apic_pm_activate();
-}
-
-void __init bsp_end_local_APIC_setup(void)
-{
- end_local_APIC_setup();
/*
* Now that local APIC setup is completed for BP, configure the fault
* handling for interrupt remapping.
*/
- if (intr_remapping_enabled)
+ if (!smp_processor_id() && intr_remapping_enabled)
enable_drhd_fault_handling();
}
@@ -1761,7 +1756,7 @@ int __init APIC_init_uniprocessor(void)
enable_IO_APIC();
#endif
- bsp_end_local_APIC_setup();
+ end_local_APIC_setup();
#ifdef CONFIG_X86_IO_APIC
if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c
index ca9e2a3545a9..697dc34b7b87 100644
--- a/trunk/arch/x86/kernel/apic/io_apic.c
+++ b/trunk/arch/x86/kernel/apic/io_apic.c
@@ -4002,9 +4002,6 @@ int mp_find_ioapic(u32 gsi)
{
int i = 0;
- if (nr_ioapics == 0)
- return -1;
-
/* Find the IOAPIC that manages this GSI. */
for (i = 0; i < nr_ioapics; i++) {
if ((gsi >= mp_gsi_routing[i].gsi_base)
diff --git a/trunk/arch/x86/kernel/check.c b/trunk/arch/x86/kernel/check.c
index 452932d34730..13a389179514 100644
--- a/trunk/arch/x86/kernel/check.c
+++ b/trunk/arch/x86/kernel/check.c
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void)
addr += size;
}
- if (num_scan_areas)
- printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
+ printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
+ num_scan_areas);
}
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy)
{
check_for_bios_corruption();
schedule_delayed_work(&bios_check_work,
- round_jiffies_relative(corruption_check_period*HZ));
+ round_jiffies_relative(corruption_check_period*HZ));
}
static int start_periodic_check_for_corruption(void)
{
- if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
+ if (!memory_corruption_check || corruption_check_period == 0)
return 0;
printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 52c93648e492..bd1cac747f67 100644
--- a/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/trunk/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
- printk_once(KERN_WARNING PFX "Warning: EST-capable "
- "CPU detected. The acpi-cpufreq module offers "
- "voltage scaling in addition to frequency "
+ printk(KERN_WARNING PFX "Warning: EST-capable CPU "
+ "detected. The acpi-cpufreq module offers "
+ "voltage scaling in addition of frequency "
"scaling. You should use that instead of "
"p4-clockmod, if possible.\n");
switch (c->x86_model) {
diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4a5a42b842ad..4f6f679f2799 100644
--- a/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/trunk/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu)
cmd_incomplete:
iowrite16(0, &pcch_hdr->status);
spin_unlock(&pcc_lock);
- return 0;
+ return -EINVAL;
}
static int pcc_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c567dec854f6..35c7e65e59be 100644
--- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1537,7 +1537,6 @@ static struct notifier_block cpb_nb = {
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0, cpu;
- int rv;
for_each_online_cpu(i) {
int rc;
@@ -1556,14 +1555,14 @@ static int __cpuinit powernowk8_init(void)
cpb_capable = true;
+ register_cpu_notifier(&cpb_nb);
+
msrs = msrs_alloc();
if (!msrs) {
printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
return -ENOMEM;
}
- register_cpu_notifier(&cpb_nb);
-
rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
for_each_cpu(cpu, cpu_online_mask) {
@@ -1575,13 +1574,7 @@ static int __cpuinit powernowk8_init(void)
(cpb_enabled ? "on" : "off"));
}
- rv = cpufreq_register_driver(&cpufreq_amd64_driver);
- if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
- unregister_cpu_notifier(&cpb_nb);
- msrs_free(msrs);
- msrs = NULL;
- }
- return rv;
+ return cpufreq_register_driver(&cpufreq_amd64_driver);
}
/* driver entry point for term */
diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c
index 26604188aa49..9d977a2ea693 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event.c
@@ -30,7 +30,6 @@
#include
#include
#include
-#include
#if 0
#undef wrmsrl
@@ -94,8 +93,6 @@ struct amd_nb {
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
-struct intel_percore;
-
#define MAX_LBR_ENTRIES 16
struct cpu_hw_events {
@@ -130,13 +127,6 @@ struct cpu_hw_events {
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
- /*
- * Intel percore register state.
- * Coordinate shared resources between HT threads.
- */
- int percore_used; /* Used by this CPU? */
- struct intel_percore *per_core;
-
/*
* AMD specific bits
*/
@@ -176,10 +166,8 @@ struct cpu_hw_events {
/*
* Constraint on the Event code + UMask
*/
-#define INTEL_UEVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define PEBS_EVENT_CONSTRAINT(c, n) \
- INTEL_UEVENT_CONSTRAINT(c, n)
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
@@ -187,28 +175,6 @@ struct cpu_hw_events {
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->weight; (e)++)
-/*
- * Extra registers for specific events.
- * Some events need large masks and require external MSRs.
- * Define a mapping to these extra registers.
- */
-struct extra_reg {
- unsigned int event;
- unsigned int msr;
- u64 config_mask;
- u64 valid_mask;
-};
-
-#define EVENT_EXTRA_REG(e, ms, m, vm) { \
- .event = (e), \
- .msr = (ms), \
- .config_mask = (m), \
- .valid_mask = (vm), \
- }
-#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
- EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
-#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
-
union perf_capabilities {
struct {
u64 lbr_format : 6;
@@ -253,7 +219,6 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
- struct event_constraint *percore_constraints;
void (*quirks)(void);
int perfctr_second_write;
@@ -282,11 +247,6 @@ struct x86_pmu {
*/
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
int lbr_nr; /* hardware stack size */
-
- /*
- * Extra registers for events
- */
- struct extra_reg *extra_regs;
};
static struct x86_pmu x86_pmu __read_mostly;
@@ -311,10 +271,6 @@ static u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
-static u64 __read_mostly hw_cache_extra_regs
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
/*
* Propagate event elapsed time into the generic event.
@@ -342,7 +298,7 @@ x86_perf_event_update(struct perf_event *event)
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
- rdmsrl(hwc->event_base, new_raw_count);
+ rdmsrl(hwc->event_base + idx, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -365,49 +321,6 @@ x86_perf_event_update(struct perf_event *event)
return new_raw_count;
}
-/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */
-static inline int x86_pmu_addr_offset(int index)
-{
- if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
- return index << 1;
- return index;
-}
-
-static inline unsigned int x86_pmu_config_addr(int index)
-{
- return x86_pmu.eventsel + x86_pmu_addr_offset(index);
-}
-
-static inline unsigned int x86_pmu_event_addr(int index)
-{
- return x86_pmu.perfctr + x86_pmu_addr_offset(index);
-}
-
-/*
- * Find and validate any extra registers to set up.
- */
-static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
-{
- struct extra_reg *er;
-
- event->hw.extra_reg = 0;
- event->hw.extra_config = 0;
-
- if (!x86_pmu.extra_regs)
- return 0;
-
- for (er = x86_pmu.extra_regs; er->msr; er++) {
- if (er->event != (config & er->config_mask))
- continue;
- if (event->attr.config1 & ~er->valid_mask)
- return -EINVAL;
- event->hw.extra_reg = er->msr;
- event->hw.extra_config = event->attr.config1;
- break;
- }
- return 0;
-}
-
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
@@ -418,12 +331,12 @@ static bool reserve_pmc_hardware(void)
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
- if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
+ if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
for (i = 0; i < x86_pmu.num_counters; i++) {
- if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
+ if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
@@ -431,13 +344,13 @@ static bool reserve_pmc_hardware(void)
eventsel_fail:
for (i--; i >= 0; i--)
- release_evntsel_nmi(x86_pmu_config_addr(i));
+ release_evntsel_nmi(x86_pmu.eventsel + i);
i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
- release_perfctr_nmi(x86_pmu_event_addr(i));
+ release_perfctr_nmi(x86_pmu.perfctr + i);
return false;
}
@@ -447,8 +360,8 @@ static void release_pmc_hardware(void)
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
- release_perfctr_nmi(x86_pmu_event_addr(i));
- release_evntsel_nmi(x86_pmu_config_addr(i));
+ release_perfctr_nmi(x86_pmu.perfctr + i);
+ release_evntsel_nmi(x86_pmu.eventsel + i);
}
}
@@ -469,7 +382,7 @@ static bool check_hw_exists(void)
* complain and bail.
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
- reg = x86_pmu_config_addr(i);
+ reg = x86_pmu.eventsel + i;
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
@@ -494,8 +407,8 @@ static bool check_hw_exists(void)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
- ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
- ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
+ ret = checking_wrmsrl(x86_pmu.perfctr, val);
+ ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
if (ret || val != val_new)
goto msr_fail;
@@ -529,9 +442,8 @@ static inline int x86_pmu_initialized(void)
}
static inline int
-set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
+set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
{
- struct perf_event_attr *attr = &event->attr;
unsigned int cache_type, cache_op, cache_result;
u64 config, val;
@@ -558,8 +470,8 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return -EINVAL;
hwc->config |= val;
- attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
- return x86_pmu_extra_regs(val, event);
+
+ return 0;
}
static int x86_setup_perfctr(struct perf_event *event)
@@ -584,10 +496,10 @@ static int x86_setup_perfctr(struct perf_event *event)
}
if (attr->type == PERF_TYPE_RAW)
- return x86_pmu_extra_regs(event->attr.config, event);
+ return 0;
if (attr->type == PERF_TYPE_HW_CACHE)
- return set_ext_hw_attr(hwc, event);
+ return set_ext_hw_attr(hwc, attr);
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
@@ -705,11 +617,11 @@ static void x86_pmu_disable_all(void)
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(x86_pmu_config_addr(idx), val);
+ rdmsrl(x86_pmu.eventsel + idx, val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(x86_pmu_config_addr(idx), val);
+ wrmsrl(x86_pmu.eventsel + idx, val);
}
}
@@ -730,26 +642,21 @@ static void x86_pmu_disable(struct pmu *pmu)
x86_pmu.disable_all();
}
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
- u64 enable_mask)
-{
- if (hwc->extra_reg)
- wrmsrl(hwc->extra_reg, hwc->extra_config);
- wrmsrl(hwc->config_base, hwc->config | enable_mask);
-}
-
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
+ struct perf_event *event = cpuc->events[idx];
+ u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
+ val = event->hw.config;
+ val |= ARCH_PERFMON_EVENTSEL_ENABLE;
+ wrmsrl(x86_pmu.eventsel + idx, val);
}
}
@@ -914,10 +821,15 @@ static inline void x86_assign_hw_event(struct perf_event *event,
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
+ /*
+ * We set it so that event_base + idx in wrmsr/rdmsr maps to
+ * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
+ */
+ hwc->event_base =
+ MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
} else {
- hwc->config_base = x86_pmu_config_addr(hwc->idx);
- hwc->event_base = x86_pmu_event_addr(hwc->idx);
+ hwc->config_base = x86_pmu.eventsel;
+ hwc->event_base = x86_pmu.perfctr;
}
}
@@ -1003,11 +915,17 @@ static void x86_pmu_enable(struct pmu *pmu)
x86_pmu.enable_all(added);
}
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+ u64 enable_mask)
+{
+ wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
+}
+
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrl(hwc->config_base + hwc->idx, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -1060,7 +978,7 @@ x86_perf_event_set_period(struct perf_event *event)
*/
local64_set(&hwc->prev_count, (u64)-left);
- wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+ wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
/*
* Due to erratum on certan cpu we need
@@ -1068,7 +986,7 @@ x86_perf_event_set_period(struct perf_event *event)
* is updated properly
*/
if (x86_pmu.perfctr_second_write) {
- wrmsrl(hwc->event_base,
+ wrmsrl(hwc->event_base + idx,
(u64)(-left) & x86_pmu.cntval_mask);
}
@@ -1195,8 +1113,8 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
- rdmsrl(x86_pmu_event_addr(idx), pmc_count);
+ rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
+ rdmsrl(x86_pmu.perfctr + idx, pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu);
@@ -1471,7 +1389,7 @@ static void __init pmu_check_apic(void)
pr_info("no hardware sampling interrupt available.\n");
}
-static int __init init_hw_perf_events(void)
+int __init init_hw_perf_events(void)
{
struct event_constraint *c;
int err;
@@ -1690,7 +1608,7 @@ static int validate_group(struct perf_event *event)
return ret;
}
-static int x86_pmu_event_init(struct perf_event *event)
+int x86_pmu_event_init(struct perf_event *event)
{
struct pmu *tmp;
int err;
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd.c b/trunk/arch/x86/kernel/cpu/perf_event_amd.c
index 461f62bbd774..67e2202a6039 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_amd.c
@@ -127,11 +127,6 @@ static int amd_pmu_hw_config(struct perf_event *event)
/*
* AMD64 events are detected based on their event codes.
*/
-static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
-{
- return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
-}
-
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
{
return (hwc->config & 0xe0) == 0xe0;
@@ -390,181 +385,13 @@ static __initconst const struct x86_pmu amd_pmu = {
.cpu_dead = amd_pmu_cpu_dead,
};
-/* AMD Family 15h */
-
-#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
-
-#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
-#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
-#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
-#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
-#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
-#define AMD_EVENT_EX_LS 0x000000C0ULL
-#define AMD_EVENT_DE 0x000000D0ULL
-#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
-
-/*
- * AMD family 15h event code/PMC mappings:
- *
- * type = event_code & 0x0F0:
- *
- * 0x000 FP PERF_CTL[5:3]
- * 0x010 FP PERF_CTL[5:3]
- * 0x020 LS PERF_CTL[5:0]
- * 0x030 LS PERF_CTL[5:0]
- * 0x040 DC PERF_CTL[5:0]
- * 0x050 DC PERF_CTL[5:0]
- * 0x060 CU PERF_CTL[2:0]
- * 0x070 CU PERF_CTL[2:0]
- * 0x080 IC/DE PERF_CTL[2:0]
- * 0x090 IC/DE PERF_CTL[2:0]
- * 0x0A0 ---
- * 0x0B0 ---
- * 0x0C0 EX/LS PERF_CTL[5:0]
- * 0x0D0 DE PERF_CTL[2:0]
- * 0x0E0 NB NB_PERF_CTL[3:0]
- * 0x0F0 NB NB_PERF_CTL[3:0]
- *
- * Exceptions:
- *
- * 0x003 FP PERF_CTL[3]
- * 0x00B FP PERF_CTL[3]
- * 0x00D FP PERF_CTL[3]
- * 0x023 DE PERF_CTL[2:0]
- * 0x02D LS PERF_CTL[3]
- * 0x02E LS PERF_CTL[3,0]
- * 0x043 CU PERF_CTL[2:0]
- * 0x045 CU PERF_CTL[2:0]
- * 0x046 CU PERF_CTL[2:0]
- * 0x054 CU PERF_CTL[2:0]
- * 0x055 CU PERF_CTL[2:0]
- * 0x08F IC PERF_CTL[0]
- * 0x187 DE PERF_CTL[0]
- * 0x188 DE PERF_CTL[0]
- * 0x0DB EX PERF_CTL[5:0]
- * 0x0DC LS PERF_CTL[5:0]
- * 0x0DD LS PERF_CTL[5:0]
- * 0x0DE LS PERF_CTL[5:0]
- * 0x0DF LS PERF_CTL[5:0]
- * 0x1D6 EX PERF_CTL[5:0]
- * 0x1D8 EX PERF_CTL[5:0]
- */
-
-static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
-static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
-static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
-static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
-static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
-static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
-
-static struct event_constraint *
-amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- unsigned int event_code = amd_get_event_code(&event->hw);
-
- switch (event_code & AMD_EVENT_TYPE_MASK) {
- case AMD_EVENT_FP:
- switch (event_code) {
- case 0x003:
- case 0x00B:
- case 0x00D:
- return &amd_f15_PMC3;
- default:
- return &amd_f15_PMC53;
- }
- case AMD_EVENT_LS:
- case AMD_EVENT_DC:
- case AMD_EVENT_EX_LS:
- switch (event_code) {
- case 0x023:
- case 0x043:
- case 0x045:
- case 0x046:
- case 0x054:
- case 0x055:
- return &amd_f15_PMC20;
- case 0x02D:
- return &amd_f15_PMC3;
- case 0x02E:
- return &amd_f15_PMC30;
- default:
- return &amd_f15_PMC50;
- }
- case AMD_EVENT_CU:
- case AMD_EVENT_IC_DE:
- case AMD_EVENT_DE:
- switch (event_code) {
- case 0x08F:
- case 0x187:
- case 0x188:
- return &amd_f15_PMC0;
- case 0x0DB ... 0x0DF:
- case 0x1D6:
- case 0x1D8:
- return &amd_f15_PMC50;
- default:
- return &amd_f15_PMC20;
- }
- case AMD_EVENT_NB:
- /* not yet implemented */
- return &emptyconstraint;
- default:
- return &emptyconstraint;
- }
-}
-
-static __initconst const struct x86_pmu amd_pmu_f15h = {
- .name = "AMD Family 15h",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = x86_pmu_enable_all,
- .enable = x86_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = amd_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_F15H_PERF_CTL,
- .perfctr = MSR_F15H_PERF_CTR,
- .event_map = amd_pmu_event_map,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = 6,
- .cntval_bits = 48,
- .cntval_mask = (1ULL << 48) - 1,
- .apic = 1,
- /* use highest bit to detect overflow */
- .max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints_f15h,
- /* nortbridge counters not yet implemented: */
-#if 0
- .put_event_constraints = amd_put_event_constraints,
-
- .cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_starting = amd_pmu_cpu_starting,
- .cpu_dead = amd_pmu_cpu_dead,
-#endif
-};
-
static __init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
if (boot_cpu_data.x86 < 6)
return -ENODEV;
- /*
- * If core performance counter extensions exists, it must be
- * family 15h, otherwise fail. See x86_pmu_addr_offset().
- */
- switch (boot_cpu_data.x86) {
- case 0x15:
- if (!cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu_f15h;
- break;
- default:
- if (cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu;
- break;
- }
+ x86_pmu = amd_pmu;
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
index 8fc2b2cee1da..008835c1d79c 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1,27 +1,5 @@
#ifdef CONFIG_CPU_SUP_INTEL
-#define MAX_EXTRA_REGS 2
-
-/*
- * Per register state.
- */
-struct er_account {
- int ref; /* reference count */
- unsigned int extra_reg; /* extra MSR number */
- u64 extra_config; /* extra MSR config */
-};
-
-/*
- * Per core state
- * This used to coordinate shared registers for HT threads.
- */
-struct intel_percore {
- raw_spinlock_t lock; /* protect structure */
- struct er_account regs[MAX_EXTRA_REGS];
- int refcnt; /* number of threads */
- unsigned core_id;
-};
-
/*
* Intel PerfMon, used on Core and later.
*/
@@ -86,18 +64,6 @@ static struct event_constraint intel_nehalem_event_constraints[] =
EVENT_CONSTRAINT_END
};
-static struct extra_reg intel_nehalem_extra_regs[] =
-{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
- EVENT_EXTRA_END
-};
-
-static struct event_constraint intel_nehalem_percore_constraints[] =
-{
- INTEL_EVENT_CONSTRAINT(0xb7, 0),
- EVENT_CONSTRAINT_END
-};
-
static struct event_constraint intel_westmere_event_constraints[] =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
@@ -110,33 +76,6 @@ static struct event_constraint intel_westmere_event_constraints[] =
EVENT_CONSTRAINT_END
};
-static struct event_constraint intel_snb_event_constraints[] =
-{
- FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
- FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
- INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
- INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
- INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
- INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
- INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
- EVENT_CONSTRAINT_END
-};
-
-static struct extra_reg intel_westmere_extra_regs[] =
-{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
- EVENT_EXTRA_END
-};
-
-static struct event_constraint intel_westmere_percore_constraints[] =
-{
- INTEL_EVENT_CONSTRAINT(0xb7, 0),
- INTEL_EVENT_CONSTRAINT(0xbb, 0),
- EVENT_CONSTRAINT_END
-};
-
static struct event_constraint intel_gen_event_constraints[] =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
@@ -150,106 +89,6 @@ static u64 intel_pmu_event_map(int hw_event)
return intel_perfmon_event_map[hw_event];
}
-static __initconst const u64 snb_hw_cache_event_ids
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
- [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
- [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
- },
- },
- [ C(L1I ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(LL ) ] = {
- /*
- * TBD: Need Off-core Response Performance Monitoring support
- */
- [ C(OP_READ) ] = {
- /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01bb,
- },
- [ C(OP_WRITE) ] = {
- /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01bb,
- },
- [ C(OP_PREFETCH) ] = {
- /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01bb,
- },
- },
- [ C(DTLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
- [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
- [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0x0,
- [ C(RESULT_MISS) ] = 0x0,
- },
- },
- [ C(ITLB) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
- [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
- [ C(BPU ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
- [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
- },
- },
-};
-
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -285,26 +124,16 @@ static __initconst const u64 westmere_hw_cache_event_ids
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
- /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01bb,
+ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
+ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
},
- /*
- * Use RFO, not WRITEBACK, because a write miss would typically occur
- * on RFO.
- */
[ C(OP_WRITE) ] = {
- /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01bb,
- /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
+ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
+ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
},
[ C(OP_PREFETCH) ] = {
- /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01bb,
+ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
+ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
},
},
[ C(DTLB) ] = {
@@ -351,39 +180,6 @@ static __initconst const u64 westmere_hw_cache_event_ids
},
};
-/*
- * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
- */
-
-#define DMND_DATA_RD (1 << 0)
-#define DMND_RFO (1 << 1)
-#define DMND_WB (1 << 3)
-#define PF_DATA_RD (1 << 4)
-#define PF_DATA_RFO (1 << 5)
-#define RESP_UNCORE_HIT (1 << 8)
-#define RESP_MISS (0xf600) /* non uncore hit */
-
-static __initconst const u64 nehalem_hw_cache_extra_regs
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(LL ) ] = {
- [ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
- [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS,
- },
- [ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
- [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS,
- },
- [ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
- [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
- },
- }
-};
-
static __initconst const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -419,26 +215,16 @@ static __initconst const u64 nehalem_hw_cache_event_ids
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
- /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
+ [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
+ [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
},
- /*
- * Use RFO, not WRITEBACK, because a write miss would typically occur
- * on RFO.
- */
[ C(OP_WRITE) ] = {
- /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
+ [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
+ [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
},
[ C(OP_PREFETCH) ] = {
- /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
- [ C(RESULT_ACCESS) ] = 0x01b7,
- /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
- [ C(RESULT_MISS) ] = 0x01b7,
+ [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
+ [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
},
},
[ C(DTLB) ] = {
@@ -905,8 +691,8 @@ static void intel_pmu_reset(void)
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
- checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
+ checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
+ checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
@@ -1007,67 +793,6 @@ intel_bts_constraints(struct perf_event *event)
return NULL;
}
-static struct event_constraint *
-intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
- struct event_constraint *c;
- struct intel_percore *pc;
- struct er_account *era;
- int i;
- int free_slot;
- int found;
-
- if (!x86_pmu.percore_constraints || hwc->extra_alloc)
- return NULL;
-
- for (c = x86_pmu.percore_constraints; c->cmask; c++) {
- if (e != c->code)
- continue;
-
- /*
- * Allocate resource per core.
- */
- pc = cpuc->per_core;
- if (!pc)
- break;
- c = &emptyconstraint;
- raw_spin_lock(&pc->lock);
- free_slot = -1;
- found = 0;
- for (i = 0; i < MAX_EXTRA_REGS; i++) {
- era = &pc->regs[i];
- if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
- /* Allow sharing same config */
- if (hwc->extra_config == era->extra_config) {
- era->ref++;
- cpuc->percore_used = 1;
- hwc->extra_alloc = 1;
- c = NULL;
- }
- /* else conflict */
- found = 1;
- break;
- } else if (era->ref == 0 && free_slot == -1)
- free_slot = i;
- }
- if (!found && free_slot != -1) {
- era = &pc->regs[free_slot];
- era->ref = 1;
- era->extra_reg = hwc->extra_reg;
- era->extra_config = hwc->extra_config;
- cpuc->percore_used = 1;
- hwc->extra_alloc = 1;
- c = NULL;
- }
- raw_spin_unlock(&pc->lock);
- return c;
- }
-
- return NULL;
-}
-
static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
@@ -1081,51 +806,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
- c = intel_percore_constraints(cpuc, event);
- if (c)
- return c;
-
return x86_get_event_constraints(cpuc, event);
}
-static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct extra_reg *er;
- struct intel_percore *pc;
- struct er_account *era;
- struct hw_perf_event *hwc = &event->hw;
- int i, allref;
-
- if (!cpuc->percore_used)
- return;
-
- for (er = x86_pmu.extra_regs; er->msr; er++) {
- if (er->event != (hwc->config & er->config_mask))
- continue;
-
- pc = cpuc->per_core;
- raw_spin_lock(&pc->lock);
- for (i = 0; i < MAX_EXTRA_REGS; i++) {
- era = &pc->regs[i];
- if (era->ref > 0 &&
- era->extra_config == hwc->extra_config &&
- era->extra_reg == er->msr) {
- era->ref--;
- hwc->extra_alloc = 0;
- break;
- }
- }
- allref = 0;
- for (i = 0; i < MAX_EXTRA_REGS; i++)
- allref += pc->regs[i].ref;
- if (allref == 0)
- cpuc->percore_used = 0;
- raw_spin_unlock(&pc->lock);
- break;
- }
-}
-
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -1197,67 +880,20 @@ static __initconst const struct x86_pmu core_pmu = {
*/
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
- .put_event_constraints = intel_put_event_constraints,
.event_constraints = intel_core_event_constraints,
};
-static int intel_pmu_cpu_prepare(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-
- if (!cpu_has_ht_siblings())
- return NOTIFY_OK;
-
- cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
- GFP_KERNEL, cpu_to_node(cpu));
- if (!cpuc->per_core)
- return NOTIFY_BAD;
-
- raw_spin_lock_init(&cpuc->per_core->lock);
- cpuc->per_core->core_id = -1;
- return NOTIFY_OK;
-}
-
static void intel_pmu_cpu_starting(int cpu)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- int core_id = topology_core_id(cpu);
- int i;
-
init_debug_store_on_cpu(cpu);
/*
* Deal with CPUs that don't clear their LBRs on power-up.
*/
intel_pmu_lbr_reset();
-
- if (!cpu_has_ht_siblings())
- return;
-
- for_each_cpu(i, topology_thread_cpumask(cpu)) {
- struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
-
- if (pc && pc->core_id == core_id) {
- kfree(cpuc->per_core);
- cpuc->per_core = pc;
- break;
- }
- }
-
- cpuc->per_core->core_id = core_id;
- cpuc->per_core->refcnt++;
}
static void intel_pmu_cpu_dying(int cpu)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- struct intel_percore *pc = cpuc->per_core;
-
- if (pc) {
- if (pc->core_id == -1 || --pc->refcnt == 0)
- kfree(pc);
- cpuc->per_core = NULL;
- }
-
fini_debug_store_on_cpu(cpu);
}
@@ -1282,9 +918,7 @@ static __initconst const struct x86_pmu intel_pmu = {
*/
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
- .put_event_constraints = intel_put_event_constraints,
- .cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
};
@@ -1390,7 +1024,6 @@ static __init int intel_pmu_init(void)
intel_pmu_lbr_init_core();
x86_pmu.event_constraints = intel_core2_event_constraints;
- x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
pr_cont("Core2 events, ");
break;
@@ -1399,16 +1032,11 @@ static __init int intel_pmu_init(void)
case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
- sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_nehalem_event_constraints;
- x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
- x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
- x86_pmu.extra_regs = intel_nehalem_extra_regs;
pr_cont("Nehalem events, ");
break;
@@ -1419,7 +1047,6 @@ static __init int intel_pmu_init(void)
intel_pmu_lbr_init_atom();
x86_pmu.event_constraints = intel_gen_event_constraints;
- x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
pr_cont("Atom events, ");
break;
@@ -1427,30 +1054,14 @@ static __init int intel_pmu_init(void)
case 44: /* 32 nm nehalem, "Gulftown" */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
- sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_westmere_event_constraints;
- x86_pmu.percore_constraints = intel_westmere_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
- x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
- x86_pmu.extra_regs = intel_westmere_extra_regs;
pr_cont("Westmere events, ");
break;
- case 42: /* SandyBridge */
- memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
-
- intel_pmu_lbr_init_nhm();
-
- x86_pmu.event_constraints = intel_snb_event_constraints;
- x86_pmu.pebs_constraints = intel_snb_pebs_events;
- pr_cont("SandyBridge events, ");
- break;
-
default:
/*
* default constraints for v2 and up
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b95c66ae4a2a..b7dcd9f2b8a0 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -361,88 +361,30 @@ static int intel_pmu_drain_bts_buffer(void)
/*
* PEBS
*/
-static struct event_constraint intel_core2_pebs_event_constraints[] = {
- PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+
+static struct event_constraint intel_core_pebs_events[] = {
+ PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_atom_pebs_event_constraints[] = {
- PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
- PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
+ PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
+ PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
EVENT_CONSTRAINT_END
};
-static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
- INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
- INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
- INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_westmere_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
- INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
-
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_snb_pebs_events[] = {
- PEBS_EVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
- PEBS_EVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- PEBS_EVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
- PEBS_EVENT_CONSTRAINT(0x01c4, 0xf), /* BR_INST_RETIRED.CONDITIONAL */
- PEBS_EVENT_CONSTRAINT(0x02c4, 0xf), /* BR_INST_RETIRED.NEAR_CALL */
- PEBS_EVENT_CONSTRAINT(0x04c4, 0xf), /* BR_INST_RETIRED.ALL_BRANCHES */
- PEBS_EVENT_CONSTRAINT(0x08c4, 0xf), /* BR_INST_RETIRED.NEAR_RETURN */
- PEBS_EVENT_CONSTRAINT(0x10c4, 0xf), /* BR_INST_RETIRED.NOT_TAKEN */
- PEBS_EVENT_CONSTRAINT(0x20c4, 0xf), /* BR_INST_RETIRED.NEAR_TAKEN */
- PEBS_EVENT_CONSTRAINT(0x40c4, 0xf), /* BR_INST_RETIRED.FAR_BRANCH */
- PEBS_EVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */
- PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
- PEBS_EVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */
- PEBS_EVENT_CONSTRAINT(0x10c5, 0xf), /* BR_MISP_RETIRED.NOT_TAKEN */
- PEBS_EVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.TAKEN */
- PEBS_EVENT_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
- PEBS_EVENT_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORE */
- PEBS_EVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
- PEBS_EVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
- PEBS_EVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
- PEBS_EVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
- PEBS_EVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
- PEBS_EVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
- PEBS_EVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
- PEBS_EVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
- PEBS_EVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
- PEBS_EVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
- PEBS_EVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.LLC_HIT */
- PEBS_EVENT_CONSTRAINT(0x40d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
- PEBS_EVENT_CONSTRAINT(0x01d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
- PEBS_EVENT_CONSTRAINT(0x02d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
- PEBS_EVENT_CONSTRAINT(0x04d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM */
- PEBS_EVENT_CONSTRAINT(0x08d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE */
- PEBS_EVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
+static struct event_constraint intel_nehalem_pebs_events[] = {
+ PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
+ PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
+ PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
+ PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
EVENT_CONSTRAINT_END
};
@@ -753,17 +695,20 @@ static void intel_ds_init(void)
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+ x86_pmu.pebs_constraints = intel_core_pebs_events;
break;
case 1:
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
+ x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
break;
default:
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
+ break;
}
}
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c
index 3769ac822f96..f7a0993c1e7c 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c
@@ -764,20 +764,15 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
u64 v;
/* an official way for overflow indication */
- rdmsrl(hwc->config_base, v);
+ rdmsrl(hwc->config_base + hwc->idx, v);
if (v & P4_CCCR_OVF) {
- wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
+ wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
return 1;
}
- /*
- * In some circumstances the overflow might issue an NMI but did
- * not set P4_CCCR_OVF bit. Because a counter holds a negative value
- * we simply check for high bit being set, if it's cleared it means
- * the counter has reached zero value and continued counting before
- * real NMI signal was received:
- */
- if (!(v & ARCH_P4_UNFLAGGED_BIT))
+ /* it might be unflagged overflow */
+ rdmsrl(hwc->event_base + hwc->idx, v);
+ if (!(v & ARCH_P4_CNTRVAL_MASK))
return 1;
return 0;
@@ -815,7 +810,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
- (void)checking_wrmsrl(hwc->config_base,
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
@@ -885,7 +880,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
p4_pmu_enable_pebs(hwc->config);
(void)checking_wrmsrl(escr_addr, escr_conf);
- (void)checking_wrmsrl(hwc->config_base,
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p6.c b/trunk/arch/x86/kernel/cpu/perf_event_p6.c
index 20c097e33860..34ba07be2cda 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_p6.c
@@ -68,7 +68,7 @@ p6_pmu_disable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base, val);
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
@@ -81,7 +81,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base, val);
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
}
static __initconst const struct x86_pmu p6_pmu = {
diff --git a/trunk/arch/x86/kernel/cpu/perfctr-watchdog.c b/trunk/arch/x86/kernel/cpu/perfctr-watchdog.c
index 966512b2cacf..d5a236615501 100644
--- a/trunk/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/trunk/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -46,8 +46,6 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
/* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- if (msr >= MSR_F15H_PERF_CTR)
- return (msr - MSR_F15H_PERF_CTR) >> 1;
return msr - MSR_K7_PERFCTR0;
case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
@@ -72,8 +70,6 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- if (msr >= MSR_F15H_PERF_CTL)
- return (msr - MSR_F15H_PERF_CTL) >> 1;
return msr - MSR_K7_EVNTSEL0;
case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
diff --git a/trunk/arch/x86/kernel/dumpstack.c b/trunk/arch/x86/kernel/dumpstack.c
index 220a1c11cfde..df20723a6a1b 100644
--- a/trunk/arch/x86/kernel/dumpstack.c
+++ b/trunk/arch/x86/kernel/dumpstack.c
@@ -320,6 +320,31 @@ void die(const char *str, struct pt_regs *regs, long err)
oops_end(flags, regs, sig);
}
+void notrace __kprobes
+die_nmi(char *str, struct pt_regs *regs, int do_panic)
+{
+ unsigned long flags;
+
+ if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
+ return;
+
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ flags = oops_begin();
+ printk(KERN_EMERG "%s", str);
+ printk(" on CPU%d, ip %08lx, registers:\n",
+ smp_processor_id(), regs->ip);
+ show_registers(regs);
+ oops_end(flags, regs, 0);
+ if (do_panic || panic_on_oops)
+ panic("Non maskable interrupt");
+ nmi_exit();
+ local_irq_enable();
+ do_exit(SIGBUS);
+}
+
static int __init oops_setup(char *s)
{
if (!s)
diff --git a/trunk/arch/x86/kernel/early-quirks.c b/trunk/arch/x86/kernel/early-quirks.c
index 9efbdcc56425..76b8cd953dee 100644
--- a/trunk/arch/x86/kernel/early-quirks.c
+++ b/trunk/arch/x86/kernel/early-quirks.c
@@ -143,10 +143,15 @@ static void __init ati_bugs(int num, int slot, int func)
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
- u32 d;
+ u32 old, d;
+ d = read_pci_config(num, slot, func, 0x70);
+ old = d;
+ d &= ~(1<<8);
+ write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
+ write_pci_config(num, slot, func, 0x70, old);
return d;
}
@@ -155,14 +160,11 @@ static void __init ati_bugs_contd(int num, int slot, int func)
{
u32 d, rev;
- rev = ati_sbx00_rev(num, slot, func);
- if (rev >= 0x40)
- acpi_fix_pin2_polarity = 1;
-
- if (rev > 0x13)
+ if (acpi_use_timer_override)
return;
- if (acpi_use_timer_override)
+ rev = ati_sbx00_rev(num, slot, func);
+ if (rev > 0x13)
return;
/* check for IRQ0 interrupt swap */
diff --git a/trunk/arch/x86/kernel/entry_32.S b/trunk/arch/x86/kernel/entry_32.S
index f5accf8eaa78..c8b4efad7ebb 100644
--- a/trunk/arch/x86/kernel/entry_32.S
+++ b/trunk/arch/x86/kernel/entry_32.S
@@ -65,8 +65,6 @@
#define sysexit_audit syscall_exit_work
#endif
- .section .entry.text, "ax"
-
/*
* We use macros for low-level operations which need to be overridden
* for paravirtualization. The following will never clobber any registers:
@@ -790,7 +788,7 @@ ENDPROC(ptregs_clone)
*/
.section .init.rodata,"a"
ENTRY(interrupt)
-.section .entry.text, "ax"
+.text
.p2align 5
.p2align CONFIG_X86_L1_CACHE_SHIFT
ENTRY(irq_entries_start)
@@ -809,7 +807,7 @@ vector=FIRST_EXTERNAL_VECTOR
.endif
.previous
.long 1b
- .section .entry.text, "ax"
+ .text
vector=vector+1
.endif
.endr
diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S
index 0a0ed794edb2..aed1ffbeb0c9 100644
--- a/trunk/arch/x86/kernel/entry_64.S
+++ b/trunk/arch/x86/kernel/entry_64.S
@@ -61,8 +61,6 @@
#define __AUDIT_ARCH_LE 0x40000000
.code64
- .section .entry.text, "ax"
-
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
@@ -746,7 +744,7 @@ END(stub_rt_sigreturn)
*/
.section .init.rodata,"a"
ENTRY(interrupt)
- .section .entry.text
+ .text
.p2align 5
.p2align CONFIG_X86_L1_CACHE_SHIFT
ENTRY(irq_entries_start)
@@ -765,7 +763,7 @@ vector=FIRST_EXTERNAL_VECTOR
.endif
.previous
.quad 1b
- .section .entry.text
+ .text
vector=vector+1
.endif
.endr
diff --git a/trunk/arch/x86/kernel/ftrace.c b/trunk/arch/x86/kernel/ftrace.c
index a93742a57468..382eb2936d4d 100644
--- a/trunk/arch/x86/kernel/ftrace.c
+++ b/trunk/arch/x86/kernel/ftrace.c
@@ -437,19 +437,18 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
return;
}
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer) == -EBUSY) {
*parent = old;
return;
}
- if (ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer) == -EBUSY) {
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
*parent = old;
- return;
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c
index 387b6a0c9e81..52945da52a94 100644
--- a/trunk/arch/x86/kernel/irq.c
+++ b/trunk/arch/x86/kernel/irq.c
@@ -367,8 +367,7 @@ void fixup_irqs(void)
if (irr & (1 << (vector % 32))) {
irq = __this_cpu_read(vector_irq[vector]);
- desc = irq_to_desc(irq);
- data = &desc->irq_data;
+ data = irq_get_irq_data(irq);
raw_spin_lock(&desc->lock);
if (data->chip->irq_retrigger)
data->chip->irq_retrigger(data);
diff --git a/trunk/arch/x86/kernel/kgdb.c b/trunk/arch/x86/kernel/kgdb.c
index 7c64c420a9f6..a4130005028a 100644
--- a/trunk/arch/x86/kernel/kgdb.c
+++ b/trunk/arch/x86/kernel/kgdb.c
@@ -533,6 +533,15 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
}
return NOTIFY_DONE;
+ case DIE_NMIWATCHDOG:
+ if (atomic_read(&kgdb_active) != -1) {
+ /* KGDB CPU roundup: */
+ kgdb_nmicallback(raw_smp_processor_id(), regs);
+ return NOTIFY_STOP;
+ }
+ /* Enter debugger: */
+ break;
+
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
diff --git a/trunk/arch/x86/kernel/kprobes.c b/trunk/arch/x86/kernel/kprobes.c
index c969fd9d1566..d91c477b3f62 100644
--- a/trunk/arch/x86/kernel/kprobes.c
+++ b/trunk/arch/x86/kernel/kprobes.c
@@ -1276,14 +1276,6 @@ static int __kprobes can_optimize(unsigned long paddr)
if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
return 0;
- /*
- * Do not optimize in the entry code due to the unstable
- * stack handling.
- */
- if ((paddr >= (unsigned long )__entry_text_start) &&
- (paddr < (unsigned long )__entry_text_end))
- return 0;
-
/* Check there is enough space for a relative jump. */
if (size - offset < RELATIVEJUMP_SIZE)
return 0;
diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c
index ff4554198981..e764fc05d700 100644
--- a/trunk/arch/x86/kernel/process.c
+++ b/trunk/arch/x86/kernel/process.c
@@ -92,31 +92,21 @@ void show_regs(struct pt_regs *regs)
void show_regs_common(void)
{
- const char *vendor, *product, *board;
+ const char *board, *product;
- vendor = dmi_get_system_info(DMI_SYS_VENDOR);
- if (!vendor)
- vendor = "";
+ board = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board)
+ board = "";
product = dmi_get_system_info(DMI_PRODUCT_NAME);
if (!product)
product = "";
- /* Board Name is optional */
- board = dmi_get_system_info(DMI_BOARD_NAME);
-
printk(KERN_CONT "\n");
- printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
current->pid, current->comm, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk(KERN_CONT " ");
- printk(KERN_CONT "%s %s", vendor, product);
- if (board) {
- printk(KERN_CONT "/");
- printk(KERN_CONT "%s", board);
- }
- printk(KERN_CONT "\n");
+ init_utsname()->version, board, product);
}
void flush_thread(void)
@@ -516,7 +506,7 @@ static void poll_idle(void)
#define MWAIT_ECX_EXTENDED_INFO 0x01
#define MWAIT_EDX_C1 0xf0
-int mwait_usable(const struct cpuinfo_x86 *c)
+int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c
index 715037caeb43..fc7aae1e2bc7 100644
--- a/trunk/arch/x86/kernel/reboot.c
+++ b/trunk/arch/x86/kernel/reboot.c
@@ -285,14 +285,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
- { /* Handle problems with rebooting on VersaLogic Menlow boards */
- .callback = set_bios_reboot,
- .ident = "VersaLogic Menlow based board",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
- DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
- },
- },
{ }
};
diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c
index 08776a953487..03273b6c272c 100644
--- a/trunk/arch/x86/kernel/smpboot.c
+++ b/trunk/arch/x86/kernel/smpboot.c
@@ -1060,7 +1060,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
connect_bsp_APIC();
setup_local_APIC();
- bsp_end_local_APIC_setup();
+ end_local_APIC_setup();
return -1;
}
@@ -1137,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
if (!skip_ioapic_setup && nr_ioapics)
enable_IO_APIC();
- bsp_end_local_APIC_setup();
+ end_local_APIC_setup();
map_cpu_to_logical_apicid();
diff --git a/trunk/arch/x86/kernel/syscall_table_32.S b/trunk/arch/x86/kernel/syscall_table_32.S
index 5f181742e8f9..b35786dc9b8f 100644
--- a/trunk/arch/x86/kernel/syscall_table_32.S
+++ b/trunk/arch/x86/kernel/syscall_table_32.S
@@ -340,6 +340,3 @@ ENTRY(sys_call_table)
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64 /* 340 */
- .long sys_name_to_handle_at
- .long sys_open_by_handle_at
- .long sys_clock_adjtime
diff --git a/trunk/arch/x86/kernel/vmlinux.lds.S b/trunk/arch/x86/kernel/vmlinux.lds.S
index 6d4341d5c52a..e9f7a3c838c3 100644
--- a/trunk/arch/x86/kernel/vmlinux.lds.S
+++ b/trunk/arch/x86/kernel/vmlinux.lds.S
@@ -105,7 +105,6 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
- ENTRY_TEXT
IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
@@ -306,7 +305,7 @@ SECTIONS
}
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
- PERCPU(THREAD_SIZE)
+ PERCPU(PAGE_SIZE)
#endif
. = ALIGN(PAGE_SIZE);
diff --git a/trunk/arch/x86/kvm/svm.c b/trunk/arch/x86/kvm/svm.c
index 63fec1531e89..25bd1bc5aad2 100644
--- a/trunk/arch/x86/kvm/svm.c
+++ b/trunk/arch/x86/kvm/svm.c
@@ -1150,8 +1150,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
loadsegment(fs, svm->host.fs);
- wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
load_gs_index(svm->host.gs);
+ wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, svm->host.gs);
#endif
@@ -2777,8 +2777,6 @@ static int dr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, reg, val);
}
- skip_emulated_instruction(&svm->vcpu);
-
return 1;
}
diff --git a/trunk/arch/x86/kvm/trace.h b/trunk/arch/x86/kvm/trace.h
index db932760ea82..1357d7cf4ec8 100644
--- a/trunk/arch/x86/kvm/trace.h
+++ b/trunk/arch/x86/kvm/trace.h
@@ -62,21 +62,21 @@ TRACE_EVENT(kvm_hv_hypercall,
TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
TP_STRUCT__entry(
+ __field( __u16, code )
+ __field( bool, fast )
__field( __u16, rep_cnt )
__field( __u16, rep_idx )
__field( __u64, ingpa )
__field( __u64, outgpa )
- __field( __u16, code )
- __field( bool, fast )
),
TP_fast_assign(
+ __entry->code = code;
+ __entry->fast = fast;
__entry->rep_cnt = rep_cnt;
__entry->rep_idx = rep_idx;
__entry->ingpa = ingpa;
__entry->outgpa = outgpa;
- __entry->code = code;
- __entry->fast = fast;
),
TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c
index 20e3f8702d1e..7d90ceb882a4 100644
--- a/trunk/arch/x86/mm/fault.c
+++ b/trunk/arch/x86/mm/fault.c
@@ -229,14 +229,15 @@ void vmalloc_sync_all(void)
for (address = VMALLOC_START & PMD_MASK;
address >= TASK_SIZE && address < FIXADDR_TOP;
address += PMD_SIZE) {
+
+ unsigned long flags;
struct page *page;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
pmd_t *ret;
- /* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
@@ -246,7 +247,7 @@ void vmalloc_sync_all(void)
if (!ret)
break;
}
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
}
@@ -827,13 +828,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
if (fault & VM_FAULT_OOM) {
- /* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
- up_read(¤t->mm->mmap_sem);
- no_context(regs, error_code, address);
- return;
- }
-
out_of_memory(regs, error_code, address);
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c
index c14a5422e152..71a59296af80 100644
--- a/trunk/arch/x86/mm/init_64.c
+++ b/trunk/arch/x86/mm/init_64.c
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
for (address = start; address <= end; address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address);
+ unsigned long flags;
struct page *page;
if (pgd_none(*pgd_ref))
continue;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
spinlock_t *pgt_lock;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
- /* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
spin_unlock(pgt_lock);
}
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
}
diff --git a/trunk/arch/x86/mm/numa_64.c b/trunk/arch/x86/mm/numa_64.c
index 1337c51b07d7..95ea1551eebc 100644
--- a/trunk/arch/x86/mm/numa_64.c
+++ b/trunk/arch/x86/mm/numa_64.c
@@ -780,7 +780,11 @@ void __cpuinit numa_add_cpu(int cpu)
int physnid;
int nid = NUMA_NO_NODE;
- nid = early_cpu_to_node(cpu);
+ apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+ if (apicid != BAD_APICID)
+ nid = apicid_to_node[apicid];
+ if (nid == NUMA_NO_NODE)
+ nid = early_cpu_to_node(cpu);
BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
/*
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index 90825f2eb0f4..d343b3c81f3c 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -57,10 +57,12 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM];
void update_page_count(int level, unsigned long pages)
{
+ unsigned long flags;
+
/* Protect against CPA */
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
direct_pages_count[level] += pages;
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
static void split_page_count(int level)
@@ -392,7 +394,7 @@ static int
try_preserve_large_page(pte_t *kpte, unsigned long address,
struct cpa_data *cpa)
{
- unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
+ unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
pte_t new_pte, old_pte, *tmp;
pgprot_t old_prot, new_prot, req_prot;
int i, do_split = 1;
@@ -401,7 +403,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
if (cpa->force_split)
return 1;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
/*
* Check for races, another CPU might have split this page
* up already:
@@ -496,14 +498,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
}
out_unlock:
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
return do_split;
}
static int split_large_page(pte_t *kpte, unsigned long address)
{
- unsigned long pfn, pfninc = 1;
+ unsigned long flags, pfn, pfninc = 1;
unsigned int i, level;
pte_t *pbase, *tmp;
pgprot_t ref_prot;
@@ -517,7 +519,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
if (!base)
return -ENOMEM;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
/*
* Check for races, another CPU might have split this page
* up for us already:
@@ -589,7 +591,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
*/
if (base)
__free_page(base);
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
return 0;
}
diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c
index 0113d19c8aa6..500242d3c96d 100644
--- a/trunk/arch/x86/mm/pgtable.c
+++ b/trunk/arch/x86/mm/pgtable.c
@@ -121,12 +121,14 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
static void pgd_dtor(pgd_t *pgd)
{
+ unsigned long flags; /* can be called from interrupt context */
+
if (SHARED_KERNEL_PMD)
return;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
/*
@@ -258,6 +260,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pmd_t *pmds[PREALLOCATED_PMDS];
+ unsigned long flags;
pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
@@ -277,12 +280,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
* respect to anything walking the pgd_list, so that they
* never see a partially populated pgd.
*/
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
pgd_ctor(mm, pgd);
pgd_prepopulate_pmd(mm, pgd, pmds);
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
return pgd;
diff --git a/trunk/arch/x86/pci/ce4100.c b/trunk/arch/x86/pci/ce4100.c
index 9260b3eb18d4..85b68ef5e809 100644
--- a/trunk/arch/x86/pci/ce4100.c
+++ b/trunk/arch/x86/pci/ce4100.c
@@ -34,7 +34,6 @@
#include
#include
-#include
#include
struct sim_reg {
@@ -307,10 +306,10 @@ struct pci_raw_ops ce4100_pci_conf = {
.write = ce4100_conf_write,
};
-int __init ce4100_pci_init(void)
+static int __init ce4100_pci_init(void)
{
init_sim_regs();
raw_pci_ops = &ce4100_pci_conf;
- /* Indicate caller that it should invoke pci_legacy_init() */
- return 1;
+ return 0;
}
+subsys_initcall(ce4100_pci_init);
diff --git a/trunk/arch/x86/pci/xen.c b/trunk/arch/x86/pci/xen.c
index 8c4085a95ef1..25cd4a07d09f 100644
--- a/trunk/arch/x86/pci/xen.c
+++ b/trunk/arch/x86/pci/xen.c
@@ -20,8 +20,7 @@
#include
#ifdef CONFIG_ACPI
-static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
- int trigger, int polarity)
+static int xen_hvm_register_pirq(u32 gsi, int triggering)
{
int rc, irq;
struct physdev_map_pirq map_irq;
@@ -42,7 +41,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
return -1;
}
- if (trigger == ACPI_EDGE_SENSITIVE) {
+ if (triggering == ACPI_EDGE_SENSITIVE) {
shareable = 0;
name = "ioapic-edge";
} else {
@@ -56,6 +55,12 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
return irq;
}
+
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_hvm_register_pirq(gsi, trigger);
+}
#endif
#if defined(CONFIG_PCI_MSI)
@@ -86,7 +91,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- int irq, pirq;
+ int irq, pirq, ret = 0;
struct msi_desc *msidesc;
struct msi_msg msg;
@@ -94,32 +99,39 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
__read_msi_msg(msidesc, &msg);
pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (msg.data != XEN_PIRQ_MSI_DATA ||
- xen_irq_from_pirq(pirq) < 0) {
- pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0)
+ if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) {
+ xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ);
+ if (irq < 0)
goto error;
- xen_msi_compose_msg(dev, pirq, &msg);
- __write_msi_msg(msidesc, &msg);
- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
- } else {
- dev_dbg(&dev->dev,
- "xen: msi already bound to pirq=%d\n", pirq);
+ ret = set_irq_msi(irq, msidesc);
+ if (ret < 0)
+ goto error_while;
+ printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d"
+ " pirq=%d\n", irq, pirq);
+ return 0;
}
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
- (type == PCI_CAP_ID_MSIX) ?
- "msi-x" : "msi");
- if (irq < 0)
+ xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ));
+ if (irq < 0 || pirq < 0)
goto error;
- dev_dbg(&dev->dev,
- "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
+ printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
+ xen_msi_compose_msg(dev, pirq, &msg);
+ ret = set_irq_msi(irq, msidesc);
+ if (ret < 0)
+ goto error_while;
+ write_msi_msg(irq, &msg);
}
return 0;
+error_while:
+ unbind_from_irqhandler(irq, NULL);
error:
- dev_err(&dev->dev,
- "Xen PCI frontend has not registered MSI/MSI-X support!\n");
- return -ENODEV;
+ if (ret == -ENODEV)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered" \
+ " MSI/MSI-X support!\n");
+
+ return ret;
}
/*
@@ -138,26 +150,35 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return -ENOMEM;
if (type == PCI_CAP_ID_MSIX)
- ret = xen_pci_frontend_enable_msix(dev, v, nvec);
+ ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
else
- ret = xen_pci_frontend_enable_msi(dev, v);
+ ret = xen_pci_frontend_enable_msi(dev, &v);
if (ret)
goto error;
i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
- (type == PCI_CAP_ID_MSIX) ?
- "pcifront-msi-x" :
- "pcifront-msi");
- if (irq < 0)
+ irq = xen_allocate_pirq(v[i], 0, /* not sharable */
+ (type == PCI_CAP_ID_MSIX) ?
+ "pcifront-msi-x" : "pcifront-msi");
+ if (irq < 0) {
+ ret = -1;
goto free;
+ }
+
+ ret = set_irq_msi(irq, msidesc);
+ if (ret)
+ goto error_while;
i++;
}
kfree(v);
return 0;
+error_while:
+ unbind_from_irqhandler(irq, NULL);
error:
- dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ if (ret == -ENODEV)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered" \
+ " MSI/MSI-X support!\n");
free:
kfree(v);
return ret;
@@ -172,9 +193,6 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
xen_pci_frontend_disable_msix(dev);
else
xen_pci_frontend_disable_msi(dev);
-
- /* Free the IRQ's and the msidesc using the generic code. */
- default_teardown_msi_irqs(dev);
}
static void xen_teardown_msi_irq(unsigned int irq)
@@ -182,82 +200,47 @@ static void xen_teardown_msi_irq(unsigned int irq)
xen_destroy_irq(irq);
}
-#ifdef CONFIG_XEN_DOM0
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
- int ret = 0;
+ int irq, ret;
struct msi_desc *msidesc;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- struct physdev_map_pirq map_irq;
-
- memset(&map_irq, 0, sizeof(map_irq));
- map_irq.domid = DOMID_SELF;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
- map_irq.index = -1;
- map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
- map_irq.devfn = dev->devfn;
-
- if (type == PCI_CAP_ID_MSIX) {
- int pos;
- u32 table_offset, bir;
-
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
- pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-
- map_irq.table_base = pci_resource_start(dev, bir);
- map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
- }
-
- ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- if (ret) {
- dev_warn(&dev->dev, "xen map irq failed %d\n", ret);
- goto out;
- }
+ irq = xen_create_msi_irq(dev, msidesc, type);
+ if (irq < 0)
+ return -1;
- ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
- map_irq.pirq, map_irq.index,
- (type == PCI_CAP_ID_MSIX) ?
- "msi-x" : "msi");
- if (ret < 0)
- goto out;
+ ret = set_irq_msi(irq, msidesc);
+ if (ret)
+ goto error;
}
- ret = 0;
-out:
+ return 0;
+
+error:
+ xen_destroy_irq(irq);
return ret;
}
#endif
-#endif
static int xen_pcifront_enable_irq(struct pci_dev *dev)
{
int rc;
int share = 1;
- u8 gsi;
- rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
- rc);
- return rc;
- }
+ dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
- if (gsi < NR_IRQS_LEGACY)
+ if (dev->irq < 0)
+ return -EINVAL;
+
+ if (dev->irq < NR_IRQS_LEGACY)
share = 0;
- rc = xen_allocate_pirq(gsi, share, "pcifront");
+ rc = xen_allocate_pirq(dev->irq, share, "pcifront");
if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
- gsi, rc);
+ dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
+ dev->irq, rc);
return rc;
}
-
- dev->irq = rc;
- dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
return 0;
}
diff --git a/trunk/arch/x86/platform/ce4100/ce4100.c b/trunk/arch/x86/platform/ce4100/ce4100.c
index cd6f184c3b3f..d2c0d51a7178 100644
--- a/trunk/arch/x86/platform/ce4100/ce4100.c
+++ b/trunk/arch/x86/platform/ce4100/ce4100.c
@@ -15,7 +15,6 @@
#include
#include
-#include
#include
#include
@@ -130,5 +129,4 @@ void __init x86_ce4100_early_setup(void)
x86_init.resources.probe_roms = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
x86_init.mpparse.find_smp_config = sdv_find_smp_config;
- x86_init.pci.init = ce4100_pci_init;
}
diff --git a/trunk/arch/x86/platform/olpc/olpc_dt.c b/trunk/arch/x86/platform/olpc/olpc_dt.c
index 044bda5b3174..dab874647530 100644
--- a/trunk/arch/x86/platform/olpc/olpc_dt.c
+++ b/trunk/arch/x86/platform/olpc/olpc_dt.c
@@ -140,7 +140,8 @@ void * __init prom_early_alloc(unsigned long size)
* wasted bootmem) and hand off chunks of it to callers.
*/
res = alloc_bootmem(chunk_size);
- BUG_ON(!res);
+ if (!res)
+ return NULL;
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
free_mem = chunk_size;
diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c
index a7b38d35c29a..df58e9cad96a 100644
--- a/trunk/arch/x86/platform/uv/tlb_uv.c
+++ b/trunk/arch/x86/platform/uv/tlb_uv.c
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode)
memset(bd2, 0, sizeof(struct bau_desc));
bd2->header.sw_ack_flag = 1;
/*
- * base_dest_nodeid is the nasid of the first uvhub
+ * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
* in the partition. The bit map will indicate uvhub numbers,
* which are 0-N in a partition. Pnodes are unique system-wide.
*/
- bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
+ bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
bd2->header.dest_subnodeid = 0x10; /* the LB */
bd2->header.command = UV_NET_ENDPOINT_INTD;
bd2->header.int_both = 1;
diff --git a/trunk/arch/x86/xen/Kconfig b/trunk/arch/x86/xen/Kconfig
index e4343fe488ed..5b54892e4bc3 100644
--- a/trunk/arch/x86/xen/Kconfig
+++ b/trunk/arch/x86/xen/Kconfig
@@ -48,11 +48,3 @@ config XEN_DEBUG_FS
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
-
-config XEN_DEBUG
- bool "Enable Xen debug checks"
- depends on XEN
- default n
- help
- Enable various WARN_ON checks in the Xen MMU code.
- Enabling this option WILL incur a significant performance overhead.
diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c
index 49dbd78ec3cb..50542efe45fb 100644
--- a/trunk/arch/x86/xen/enlighten.c
+++ b/trunk/arch/x86/xen/enlighten.c
@@ -1284,14 +1284,15 @@ static int init_hvm_pv_info(int *major, int *minor)
xen_setup_features();
- pv_info.name = "Xen HVM";
+ pv_info = xen_info;
+ pv_info.kernel_rpl = 0;
xen_domain_type = XEN_HVM_DOMAIN;
return 0;
}
-void __ref xen_hvm_init_shared_info(void)
+void xen_hvm_init_shared_info(void)
{
int cpu;
struct xen_add_to_physmap xatp;
@@ -1330,8 +1331,6 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
- if (xen_have_vector_callback)
- xen_init_lock_cpu(cpu);
break;
default:
break;
@@ -1356,7 +1355,6 @@ static void __init xen_hvm_guest_init(void)
if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
- xen_hvm_smp_init();
register_cpu_notifier(&xen_hvm_cpu_notifier);
xen_unplug_emulated_devices();
have_vcpu_info_placement = 0;
diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c
index 832765c0fb8c..5e92b61ad574 100644
--- a/trunk/arch/x86/xen/mmu.c
+++ b/trunk/arch/x86/xen/mmu.c
@@ -46,7 +46,6 @@
#include
#include
#include
-#include
#include
#include
@@ -417,12 +416,8 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
- unsigned long mfn;
+ unsigned long mfn = pfn_to_mfn(pfn);
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- mfn = get_phys_to_machine(pfn);
- else
- mfn = pfn;
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
@@ -432,18 +427,8 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0;
flags = 0;
- } else {
- /*
- * Paramount to do this test _after_ the
- * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
- * IDENTITY_FRAME_BIT resolves to true.
- */
- mfn &= ~FOREIGN_FRAME_BIT;
- if (mfn & IDENTITY_FRAME_BIT) {
- mfn &= ~IDENTITY_FRAME_BIT;
- flags |= _PAGE_IOMAP;
- }
}
+
val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
@@ -547,41 +532,6 @@ pte_t xen_make_pte(pteval_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
-#ifdef CONFIG_XEN_DEBUG
-pte_t xen_make_pte_debug(pteval_t pte)
-{
- phys_addr_t addr = (pte & PTE_PFN_MASK);
- phys_addr_t other_addr;
- bool io_page = false;
- pte_t _pte;
-
- if (pte & _PAGE_IOMAP)
- io_page = true;
-
- _pte = xen_make_pte(pte);
-
- if (!addr)
- return _pte;
-
- if (io_page &&
- (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
- other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
- WARN(addr != other_addr,
- "0x%lx is using VM_IO, but it is 0x%lx!\n",
- (unsigned long)addr, (unsigned long)other_addr);
- } else {
- pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
- other_addr = (_pte.pte & PTE_PFN_MASK);
- WARN((addr == other_addr) && (!io_page) && (!iomap_set),
- "0x%lx is missing VM_IO (and wasn't fixed)!\n",
- (unsigned long)addr);
- }
-
- return _pte;
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
-#endif
-
pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
@@ -1036,9 +986,10 @@ static void xen_pgd_pin(struct mm_struct *mm)
*/
void xen_mm_pin_all(void)
{
+ unsigned long flags;
struct page *page;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
if (!PagePinned(page)) {
@@ -1047,7 +998,7 @@ void xen_mm_pin_all(void)
}
}
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
/*
@@ -1148,9 +1099,10 @@ static void xen_pgd_unpin(struct mm_struct *mm)
*/
void xen_mm_unpin_all(void)
{
+ unsigned long flags;
struct page *page;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
if (PageSavePinned(page)) {
@@ -1160,7 +1112,7 @@ void xen_mm_unpin_all(void)
}
}
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
@@ -1990,9 +1942,6 @@ __init void xen_ident_map_ISA(void)
static __init void xen_post_allocator_init(void)
{
-#ifdef CONFIG_XEN_DEBUG
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
-#endif
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2125,7 +2074,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
in_frames[i] = virt_to_mfn(vaddr);
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
- __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+ set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
if (out_frames)
out_frames[i] = virt_to_pfn(vaddr);
@@ -2404,18 +2353,6 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
#ifdef CONFIG_XEN_DEBUG_FS
-static int p2m_dump_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, p2m_dump_show, NULL);
-}
-
-static const struct file_operations p2m_dump_fops = {
- .open = p2m_dump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static struct dentry *d_mmu_debug;
static int __init xen_mmu_debugfs(void)
@@ -2471,7 +2408,6 @@ static int __init xen_mmu_debugfs(void)
debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
&mmu_stats.prot_commit_batched);
- debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
return 0;
}
fs_initcall(xen_mmu_debugfs);
diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c
index 215a3ce61068..fd12d7ce7ff9 100644
--- a/trunk/arch/x86/xen/p2m.c
+++ b/trunk/arch/x86/xen/p2m.c
@@ -23,129 +23,6 @@
* P2M_PER_PAGE depends on the architecture, as a mfn is always
* unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
* 512 and 1024 entries respectively.
- *
- * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
- *
- * However not all entries are filled with MFNs. Specifically for all other
- * leaf entries, or for the top root, or middle one, for which there is a void
- * entry, we assume it is "missing". So (for example)
- * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
- *
- * We also have the possibility of setting 1-1 mappings on certain regions, so
- * that:
- * pfn_to_mfn(0xc0000)=0xc0000
- *
- * The benefit of this is, that we can assume for non-RAM regions (think
- * PCI BARs, or ACPI spaces), we can create mappings easily b/c we
- * get the PFN value to match the MFN.
- *
- * For this to work efficiently we have one new page p2m_identity and
- * allocate (via reserved_brk) any other pages we need to cover the sides
- * (1GB or 4MB boundary violations). All entries in p2m_identity are set to
- * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
- * no other fancy value).
- *
- * On lookup we spot that the entry points to p2m_identity and return the
- * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
- * If the entry points to an allocated page, we just proceed as before and
- * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
- * appropriate functions (pfn_to_mfn).
- *
- * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
- * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
- * non-identity pfn. To protect ourselves against we elect to set (and get) the
- * IDENTITY_FRAME_BIT on all identity mapped PFNs.
- *
- * This simplistic diagram is used to explain the more subtle piece of code.
- * There is also a digram of the P2M at the end that can help.
- * Imagine your E820 looking as so:
- *
- * 1GB 2GB
- * /-------------------+---------\/----\ /----------\ /---+-----\
- * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM |
- * \-------------------+---------/\----/ \----------/ \---+-----/
- * ^- 1029MB ^- 2001MB
- *
- * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100),
- * 2048MB = 524288 (0x80000)]
- *
- * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
- * is actually not present (would have to kick the balloon driver to put it in).
- *
- * When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
- * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
- * of the PFN and the end PFN (263424 and 512256 respectively). The first step
- * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
- * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
- * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn
- * to end pfn. We reserve_brk top leaf pages if they are missing (means they
- * point to p2m_mid_missing).
- *
- * With the E820 example above, 263424 is not 1GB aligned so we allocate a
- * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
- * Each entry in the allocate page is "missing" (points to p2m_missing).
- *
- * Next stage is to determine if we need to do a more granular boundary check
- * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
- * We check if the start pfn and end pfn violate that boundary check, and if
- * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer
- * granularity of setting which PFNs are missing and which ones are identity.
- * In our example 263424 and 512256 both fail the check so we reserve_brk two
- * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
- * values) and assign them to p2m[1][2] and p2m[1][488] respectively.
- *
- * At this point we would at minimum reserve_brk one page, but could be up to
- * three. Each call to set_phys_range_identity has at maximum a three page
- * cost. If we were to query the P2M at this stage, all those entries from
- * start PFN through end PFN (so 1029MB -> 2001MB) would return
- * INVALID_P2M_ENTRY ("missing").
- *
- * The next step is to walk from the start pfn to the end pfn setting
- * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
- * If we find that the middle leaf is pointing to p2m_missing we can swap it
- * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this
- * point we do not need to worry about boundary aligment (so no need to
- * reserve_brk a middle page, figure out which PFNs are "missing" and which
- * ones are identity), as that has been done earlier. If we find that the
- * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
- * that page (which covers 512 PFNs) and set the appropriate PFN with
- * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we
- * set from p2m[1][2][256->511] and p2m[1][488][0->256] with
- * IDENTITY_FRAME_BIT set.
- *
- * All other regions that are void (or not filled) either point to p2m_missing
- * (considered missing) or have the default value of INVALID_P2M_ENTRY (also
- * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
- * contain the INVALID_P2M_ENTRY value and are considered "missing."
- *
- * This is what the p2m ends up looking (for the E820 above) with this
- * fabulous drawing:
- *
- * p2m /--------------\
- * /-----\ | &mfn_list[0],| /-----------------\
- * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. |
- * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] |
- * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] |
- * |-----| \ | [p2m_identity]+\\ | .... |
- * | 2 |--\ \-------------------->| ... | \\ \----------------/
- * |-----| \ \---------------/ \\
- * | 3 |\ \ \\ p2m_identity
- * |-----| \ \-------------------->/---------------\ /-----------------\
- * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... |
- * \-----/ / | [p2m_identity]+-->| ..., ~0 |
- * / /---------------\ | .... | \-----------------/
- * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. |
- * / | IDENTITY[@256]|<----/ \---------------/
- * / | ~0, ~0, .... |
- * | \---------------/
- * |
- * p2m_missing p2m_missing
- * /------------------\ /------------\
- * | [p2m_mid_missing]+---->| ~0, ~0, ~0 |
- * | [p2m_mid_missing]+---->| ..., ~0 |
- * \------------------/ \------------/
- *
- * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
*/
#include
@@ -153,7 +30,6 @@
#include
#include
#include
-#include
#include
#include
@@ -183,15 +59,9 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
-
RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-/* We might hit two boundary violations at the start and end, at max each
- * boundary violation will require three middle nodes. */
-RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
-
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
@@ -266,7 +136,7 @@ static void p2m_init(unsigned long *p2m)
* - After resume we're called from within stop_machine, but the mfn
* tree should alreay be completely allocated.
*/
-void __ref xen_build_mfn_list_list(void)
+void xen_build_mfn_list_list(void)
{
unsigned long pfn;
@@ -351,9 +221,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_top_init(p2m_top);
- p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m_identity);
-
/*
* The domain builder gives us a pre-constructed p2m array in
* mfn_list for all the pages initially given to us, so we just
@@ -399,14 +266,6 @@ unsigned long get_phys_to_machine(unsigned long pfn)
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
- /*
- * The INVALID_P2M_ENTRY is filled in both p2m_*identity
- * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
- * would be wrong.
- */
- if (p2m_top[topidx][mididx] == p2m_identity)
- return IDENTITY_FRAME(pfn);
-
return p2m_top[topidx][mididx][idx];
}
EXPORT_SYMBOL_GPL(get_phys_to_machine);
@@ -476,11 +335,9 @@ static bool alloc_p2m(unsigned long pfn)
p2m_top_mfn_p[topidx] = mid_mfn;
}
- if (p2m_top[topidx][mididx] == p2m_identity ||
- p2m_top[topidx][mididx] == p2m_missing) {
+ if (p2m_top[topidx][mididx] == p2m_missing) {
/* p2m leaf page is missing */
unsigned long *p2m;
- unsigned long *p2m_orig = p2m_top[topidx][mididx];
p2m = alloc_p2m_page();
if (!p2m)
@@ -488,7 +345,7 @@ static bool alloc_p2m(unsigned long pfn)
p2m_init(p2m);
- if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
+ if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
free_p2m_page(p2m);
else
mid_mfn[mididx] = virt_to_mfn(p2m);
@@ -497,91 +354,11 @@ static bool alloc_p2m(unsigned long pfn)
return true;
}
-bool __early_alloc_p2m(unsigned long pfn)
-{
- unsigned topidx, mididx, idx;
-
- topidx = p2m_top_index(pfn);
- mididx = p2m_mid_index(pfn);
- idx = p2m_index(pfn);
-
- /* Pfff.. No boundary cross-over, lets get out. */
- if (!idx)
- return false;
-
- WARN(p2m_top[topidx][mididx] == p2m_identity,
- "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
- topidx, mididx);
-
- /*
- * Could be done by xen_build_dynamic_phys_to_machine..
- */
- if (p2m_top[topidx][mididx] != p2m_missing)
- return false;
-
- /* Boundary cross-over for the edges: */
- if (idx) {
- unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_init(p2m);
-
- p2m_top[topidx][mididx] = p2m;
-
- }
- return idx != 0;
-}
-unsigned long set_phys_range_identity(unsigned long pfn_s,
- unsigned long pfn_e)
-{
- unsigned long pfn;
-
- if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
- return 0;
-
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
- return pfn_e - pfn_s;
-
- if (pfn_s > pfn_e)
- return 0;
-
- for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
- pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
- pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
- {
- unsigned topidx = p2m_top_index(pfn);
- if (p2m_top[topidx] == p2m_mid_missing) {
- unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_mid_init(mid);
-
- p2m_top[topidx] = mid;
- }
- }
-
- __early_alloc_p2m(pfn_s);
- __early_alloc_p2m(pfn_e);
-
- for (pfn = pfn_s; pfn < pfn_e; pfn++)
- if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
- break;
-
- if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
- "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
- (pfn_e - pfn_s) - (pfn - pfn_s)))
- printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
-
- return pfn - pfn_s;
-}
-
/* Try to install p2m mapping; fail if intermediate bits missing */
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
unsigned topidx, mididx, idx;
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return true;
- }
if (unlikely(pfn >= MAX_P2M_PFN)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
@@ -591,21 +368,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
- /* For sparse holes were the p2m leaf has real PFN along with
- * PCI holes, stick in the PFN as the MFN value.
- */
- if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
- if (p2m_top[topidx][mididx] == p2m_identity)
- return true;
-
- /* Swap over from MISSING to IDENTITY if needed. */
- if (p2m_top[topidx][mididx] == p2m_missing) {
- WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
- p2m_identity) != p2m_missing);
- return true;
- }
- }
-
if (p2m_top[topidx][mididx] == p2m_missing)
return mfn == INVALID_P2M_ENTRY;
@@ -616,6 +378,11 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
+ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ return true;
+ }
+
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!alloc_p2m(pfn))
return false;
@@ -654,7 +421,7 @@ int m2p_add_override(unsigned long mfn, struct page *page)
{
unsigned long flags;
unsigned long pfn;
- unsigned long uninitialized_var(address);
+ unsigned long address;
unsigned level;
pte_t *ptep = NULL;
@@ -688,7 +455,7 @@ int m2p_remove_override(struct page *page)
unsigned long flags;
unsigned long mfn;
unsigned long pfn;
- unsigned long uninitialized_var(address);
+ unsigned long address;
unsigned level;
pte_t *ptep = NULL;
@@ -753,80 +520,3 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
return ret;
}
EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
-
-#ifdef CONFIG_XEN_DEBUG_FS
-
-int p2m_dump_show(struct seq_file *m, void *v)
-{
- static const char * const level_name[] = { "top", "middle",
- "entry", "abnormal" };
- static const char * const type_name[] = { "identity", "missing",
- "pfn", "abnormal"};
-#define TYPE_IDENTITY 0
-#define TYPE_MISSING 1
-#define TYPE_PFN 2
-#define TYPE_UNKNOWN 3
- unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
- unsigned int uninitialized_var(prev_level);
- unsigned int uninitialized_var(prev_type);
-
- if (!p2m_top)
- return 0;
-
- for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
- unsigned topidx = p2m_top_index(pfn);
- unsigned mididx = p2m_mid_index(pfn);
- unsigned idx = p2m_index(pfn);
- unsigned lvl, type;
-
- lvl = 4;
- type = TYPE_UNKNOWN;
- if (p2m_top[topidx] == p2m_mid_missing) {
- lvl = 0; type = TYPE_MISSING;
- } else if (p2m_top[topidx] == NULL) {
- lvl = 0; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx] == NULL) {
- lvl = 1; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx] == p2m_identity) {
- lvl = 1; type = TYPE_IDENTITY;
- } else if (p2m_top[topidx][mididx] == p2m_missing) {
- lvl = 1; type = TYPE_MISSING;
- } else if (p2m_top[topidx][mididx][idx] == 0) {
- lvl = 2; type = TYPE_UNKNOWN;
- } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
- lvl = 2; type = TYPE_IDENTITY;
- } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
- lvl = 2; type = TYPE_MISSING;
- } else if (p2m_top[topidx][mididx][idx] == pfn) {
- lvl = 2; type = TYPE_PFN;
- } else if (p2m_top[topidx][mididx][idx] != pfn) {
- lvl = 2; type = TYPE_PFN;
- }
- if (pfn == 0) {
- prev_level = lvl;
- prev_type = type;
- }
- if (pfn == MAX_DOMAIN_PAGES-1) {
- lvl = 3;
- type = TYPE_UNKNOWN;
- }
- if (prev_type != type) {
- seq_printf(m, " [0x%lx->0x%lx] %s\n",
- prev_pfn_type, pfn, type_name[prev_type]);
- prev_pfn_type = pfn;
- prev_type = type;
- }
- if (prev_level != lvl) {
- seq_printf(m, " [0x%lx->0x%lx] level %s\n",
- prev_pfn_level, pfn, level_name[prev_level]);
- prev_pfn_level = pfn;
- prev_level = lvl;
- }
- }
- return 0;
-#undef TYPE_IDENTITY
-#undef TYPE_MISSING
-#undef TYPE_PFN
-#undef TYPE_UNKNOWN
-}
-#endif
diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c
index fa0269a99377..a8a66a50d446 100644
--- a/trunk/arch/x86/xen/setup.c
+++ b/trunk/arch/x86/xen/setup.c
@@ -52,8 +52,6 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
static __init void xen_add_extra_mem(unsigned long pages)
{
- unsigned long pfn;
-
u64 size = (u64)pages * PAGE_SIZE;
u64 extra_start = xen_extra_mem_start + xen_extra_mem_size;
@@ -68,9 +66,6 @@ static __init void xen_add_extra_mem(unsigned long pages)
xen_extra_mem_size += size;
xen_max_p2m_pfn = PFN_DOWN(extra_start + size);
-
- for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++)
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
@@ -109,7 +104,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n",
start, end, ret);
if (ret == 1) {
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
len++;
}
}
@@ -143,55 +138,12 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
return released;
}
-static unsigned long __init xen_set_identity(const struct e820entry *list,
- ssize_t map_size)
-{
- phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
- phys_addr_t start_pci = last;
- const struct e820entry *entry;
- unsigned long identity = 0;
- int i;
-
- for (i = 0, entry = list; i < map_size; i++, entry++) {
- phys_addr_t start = entry->addr;
- phys_addr_t end = start + entry->size;
-
- if (start < last)
- start = last;
-
- if (end <= start)
- continue;
-
- /* Skip over the 1MB region. */
- if (last > end)
- continue;
-
- if (entry->type == E820_RAM) {
- if (start > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(start));
-
- /* Without saving 'last' we would gooble RAM too
- * at the end of the loop. */
- last = end;
- start_pci = end;
- continue;
- }
- start_pci = min(start, start_pci);
- last = end;
- }
- if (last > start_pci)
- identity += set_phys_range_identity(
- PFN_UP(start_pci), PFN_DOWN(last));
- return identity;
-}
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
- static struct e820entry map_raw[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end;
@@ -199,7 +151,6 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long extra_pages = 0;
unsigned long extra_limit;
- unsigned long identity_pages = 0;
int i;
int op;
@@ -225,7 +176,6 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
- memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) {
@@ -244,15 +194,6 @@ char * __init xen_memory_setup(void)
end -= delta;
extra_pages += PFN_DOWN(delta);
- /*
- * Set RAM below 4GB that is not for us to be unusable.
- * This prevents "System RAM" address space from being
- * used as potential resource for I/O address (happens
- * when 'allocate_resource' is called).
- */
- if (delta &&
- (xen_initial_domain() && end < 0x100000000ULL))
- e820_add_region(end, delta, E820_UNUSABLE);
}
if (map[i].size > 0 && end > xen_extra_mem_start)
@@ -310,13 +251,6 @@ char * __init xen_memory_setup(void)
xen_add_extra_mem(extra_pages);
- /*
- * Set P2M for all non-RAM pages and E820 gaps to be identity
- * type PFNs. We supply it with the non-sanitized version
- * of the E820.
- */
- identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
return "Xen";
}
diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c
index 30612441ed99..72a4c7959045 100644
--- a/trunk/arch/x86/xen/smp.c
+++ b/trunk/arch/x86/xen/smp.c
@@ -509,41 +509,3 @@ void __init xen_smp_init(void)
xen_fill_possible_map();
xen_init_spinlocks();
}
-
-static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
-{
- native_smp_prepare_cpus(max_cpus);
- WARN_ON(xen_smp_intr_init(0));
-
- if (!xen_have_vector_callback)
- return;
- xen_init_lock_cpu(0);
- xen_init_spinlocks();
-}
-
-static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
-{
- int rc;
- rc = native_cpu_up(cpu);
- WARN_ON (xen_smp_intr_init(cpu));
- return rc;
-}
-
-static void xen_hvm_cpu_die(unsigned int cpu)
-{
- unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- native_cpu_die(cpu);
-}
-
-void __init xen_hvm_smp_init(void)
-{
- smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
- smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
- smp_ops.cpu_up = xen_hvm_cpu_up;
- smp_ops.cpu_die = xen_hvm_cpu_die;
- smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
- smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
-}
diff --git a/trunk/arch/x86/xen/suspend.c b/trunk/arch/x86/xen/suspend.c
index 45329c8c226e..9bbd63a129b5 100644
--- a/trunk/arch/x86/xen/suspend.c
+++ b/trunk/arch/x86/xen/suspend.c
@@ -12,7 +12,7 @@
#include "xen-ops.h"
#include "mmu.h"
-void xen_arch_pre_suspend(void)
+void xen_pre_suspend(void)
{
xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
xen_start_info->console.domU.mfn =
@@ -26,9 +26,8 @@ void xen_arch_pre_suspend(void)
BUG();
}
-void xen_arch_hvm_post_suspend(int suspend_cancelled)
+void xen_hvm_post_suspend(int suspend_cancelled)
{
-#ifdef CONFIG_XEN_PVHVM
int cpu;
xen_hvm_init_shared_info();
xen_callback_vector();
@@ -38,10 +37,9 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
xen_setup_runstate_info(cpu);
}
}
-#endif
}
-void xen_arch_post_suspend(int suspend_cancelled)
+void xen_post_suspend(int suspend_cancelled)
{
xen_build_mfn_list_list();
diff --git a/trunk/arch/x86/xen/time.c b/trunk/arch/x86/xen/time.c
index 2e2d370a47b1..067759e3d6a5 100644
--- a/trunk/arch/x86/xen/time.c
+++ b/trunk/arch/x86/xen/time.c
@@ -397,9 +397,7 @@ void xen_setup_timer(int cpu)
name = "";
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
- IRQF_DISABLED|IRQF_PERCPU|
- IRQF_NOBALANCING|IRQF_TIMER|
- IRQF_FORCE_RESUME,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
name, NULL);
evt = &per_cpu(xen_clock_events, cpu);
diff --git a/trunk/arch/x86/xen/xen-ops.h b/trunk/arch/x86/xen/xen-ops.h
index 3112f55638c4..9d41bf985757 100644
--- a/trunk/arch/x86/xen/xen-ops.h
+++ b/trunk/arch/x86/xen/xen-ops.h
@@ -64,12 +64,10 @@ void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void xen_smp_init(void);
-void __init xen_hvm_smp_init(void);
extern cpumask_var_t xen_cpu_initialized_map;
#else
static inline void xen_smp_init(void) {}
-static inline void xen_hvm_smp_init(void) {}
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/trunk/arch/xtensa/include/asm/rwsem.h b/trunk/arch/xtensa/include/asm/rwsem.h
index 249619e7e7f2..e39edf5c86f2 100644
--- a/trunk/arch/xtensa/include/asm/rwsem.h
+++ b/trunk/arch/xtensa/include/asm/rwsem.h
@@ -17,12 +17,44 @@
#error "Please don't include directly, use instead."
#endif
+#include
+#include
+#include
+#include
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ signed long count;
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
/*
* lock for reading
@@ -128,4 +160,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
#endif /* _XTENSA_RWSEM_H */
diff --git a/trunk/arch/xtensa/kernel/time.c b/trunk/arch/xtensa/kernel/time.c
index f3e5eb43f71c..19df764f6399 100644
--- a/trunk/arch/xtensa/kernel/time.c
+++ b/trunk/arch/xtensa/kernel/time.c
@@ -96,12 +96,16 @@ irqreturn_t timer_interrupt (int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
#endif
- xtime_update(1); /* Linux handler in kernel/time/timekeeping */
+ write_seqlock(&xtime_lock);
+
+ do_timer(1); /* Linux handler in kernel/timer.c */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY;
set_linux_timer(next);
+
+ write_sequnlock(&xtime_lock);
}
/* Allow platform to do something useful (Wdog). */
diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c
index 518dd423a5fe..2f4002f79a24 100644
--- a/trunk/block/blk-core.c
+++ b/trunk/block/blk-core.c
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -403,14 +403,13 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
- * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
*
*/
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
{
blk_remove_plug(q);
@@ -424,7 +423,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
@@ -447,7 +446,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -1054,7 +1053,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -2611,6 +2610,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
+int kblockd_schedule_delayed_work(struct request_queue *q,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(kblockd_workqueue, dwork, delay);
+}
+EXPORT_SYMBOL(kblockd_schedule_delayed_work);
+
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/trunk/block/blk-flush.c b/trunk/block/blk-flush.c
index b27d0208611b..54b123d6563e 100644
--- a/trunk/block/blk-flush.c
+++ b/trunk/block/blk-flush.c
@@ -66,12 +66,10 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
/*
* Moving a request silently to empty queue_head may stall the
- * queue. Kick the queue in those cases. This function is called
- * from request completion path and calling directly into
- * request_fn may confuse the driver. Always use kblockd.
+ * queue. Kick the queue in those cases.
*/
if (was_empty && next_rq)
- __blk_run_queue(q, true);
+ __blk_run_queue(q);
}
static void pre_flush_end_io(struct request *rq, int error)
@@ -132,7 +130,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
BUG();
}
- elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
+ elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
return rq;
}
diff --git a/trunk/block/blk-lib.c b/trunk/block/blk-lib.c
index bd3e8df4d5e2..1a320d2406b0 100644
--- a/trunk/block/blk-lib.c
+++ b/trunk/block/blk-lib.c
@@ -109,6 +109,7 @@ struct bio_batch
atomic_t done;
unsigned long flags;
struct completion *wait;
+ bio_end_io_t *end_io;
};
static void bio_batch_end_io(struct bio *bio, int err)
@@ -121,14 +122,17 @@ static void bio_batch_end_io(struct bio *bio, int err)
else
clear_bit(BIO_UPTODATE, &bb->flags);
}
- if (bb)
- if (atomic_dec_and_test(&bb->done))
- complete(bb->wait);
+ if (bb) {
+ if (bb->end_io)
+ bb->end_io(bio, err);
+ atomic_inc(&bb->done);
+ complete(bb->wait);
+ }
bio_put(bio);
}
/**
- * blkdev_issue_zeroout - generate number of zero filed write bios
+ * blkdev_issue_zeroout generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
* @nr_sects: number of sectors to write
@@ -146,12 +150,13 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
int ret;
struct bio *bio;
struct bio_batch bb;
- unsigned int sz;
+ unsigned int sz, issued = 0;
DECLARE_COMPLETION_ONSTACK(wait);
- atomic_set(&bb.done, 1);
+ atomic_set(&bb.done, 0);
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
+ bb.end_io = NULL;
submit:
ret = 0;
@@ -180,12 +185,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
break;
}
ret = 0;
- atomic_inc(&bb.done);
+ issued++;
submit_bio(WRITE, bio);
}
/* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
+ while (issued != atomic_read(&bb.done))
wait_for_completion(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
diff --git a/trunk/block/blk-throttle.c b/trunk/block/blk-throttle.c
index e36cc10a346c..381b09bb562b 100644
--- a/trunk/block/blk-throttle.c
+++ b/trunk/block/blk-throttle.c
@@ -20,11 +20,6 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
-/* A workqueue to queue throttle related work */
-static struct workqueue_struct *kthrotld_workqueue;
-static void throtl_schedule_delayed_work(struct throtl_data *td,
- unsigned long delay);
-
struct throtl_rb_root {
struct rb_root rb;
struct rb_node *left;
@@ -173,15 +168,7 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
* tree of blkg (instead of traversing through hash list all
* the time.
*/
-
- /*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- tg = &td->root_tg;
- else
- tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
/* Fill in device details for root group */
if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
@@ -350,9 +337,10 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
update_min_dispatch_time(st);
if (time_before_eq(st->min_disptime, jiffies))
- throtl_schedule_delayed_work(td, 0);
+ throtl_schedule_delayed_work(td->queue, 0);
else
- throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
+ throtl_schedule_delayed_work(td->queue,
+ (st->min_disptime - jiffies));
}
static inline void
@@ -819,10 +807,10 @@ void blk_throtl_work(struct work_struct *work)
}
/* Call with queue lock held */
-static void
-throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
+void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
{
+ struct throtl_data *td = q->td;
struct delayed_work *dwork = &td->throtl_work;
if (total_nr_queued(td) > 0) {
@@ -831,11 +819,12 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
* Cancel that and schedule a new one.
*/
__cancel_delayed_work(dwork);
- queue_delayed_work(kthrotld_workqueue, dwork, delay);
+ kblockd_schedule_delayed_work(q, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies);
}
}
+EXPORT_SYMBOL(throtl_schedule_delayed_work);
static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -923,7 +912,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
smp_mb__after_atomic_inc();
/* Schedule a work now to process the limit change */
- throtl_schedule_delayed_work(td, 0);
+ throtl_schedule_delayed_work(td->queue, 0);
}
static void throtl_update_blkio_group_write_bps(void *key,
@@ -937,7 +926,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td, 0);
+ throtl_schedule_delayed_work(td->queue, 0);
}
static void throtl_update_blkio_group_read_iops(void *key,
@@ -951,7 +940,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td, 0);
+ throtl_schedule_delayed_work(td->queue, 0);
}
static void throtl_update_blkio_group_write_iops(void *key,
@@ -965,7 +954,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
smp_mb__before_atomic_inc();
atomic_inc(&td->limits_changed);
smp_mb__after_atomic_inc();
- throtl_schedule_delayed_work(td, 0);
+ throtl_schedule_delayed_work(td->queue, 0);
}
void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1138,10 +1127,6 @@ void blk_throtl_exit(struct request_queue *q)
static int __init throtl_init(void)
{
- kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
- if (!kthrotld_workqueue)
- panic("Failed to create kthrotld\n");
-
blkio_policy_register(&blkio_policy_throtl);
return 0;
}
diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c
index ea83a4f0c27d..501ffdf0399c 100644
--- a/trunk/block/cfq-iosched.c
+++ b/trunk/block/cfq-iosched.c
@@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
}
static inline unsigned
-cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
if (cfqd->cfq_latency) {
@@ -631,7 +631,7 @@ cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
+ unsigned slice = cfq_scaled_group_slice(cfqd, cfqq);
cfqq->slice_start = jiffies;
cfqq->slice_end = jiffies + slice;
@@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
*/
if (timed_out) {
if (cfq_cfqq_slice_new(cfqq))
- cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
+ cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq);
else
cfqq->slice_resid = cfqq->slice_end - jiffies;
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
}
}
@@ -3432,10 +3432,6 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct cfq_io_context *cic = cfqd->active_cic;
- /* If the queue already has requests, don't wait */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- return false;
-
/* If there are other queues in the group, don't wait */
if (cfqq->cfqg->nr_cfqq > 1)
return false;
@@ -3731,7 +3727,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}
diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c
index 236e93c1f46c..2569512830d3 100644
--- a/trunk/block/elevator.c
+++ b/trunk/block/elevator.c
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT:
diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c
index cbf1112a885c..6a5b772aa201 100644
--- a/trunk/block/genhd.c
+++ b/trunk/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
- res = __invalidate_device(bdev, true);
+ res = __invalidate_device(bdev);
bdput(bdev);
}
return res;
diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c
index 1124cd297263..9049d460fa89 100644
--- a/trunk/block/ioctl.c
+++ b/trunk/block/ioctl.c
@@ -294,11 +294,9 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
- if (!(mode & FMODE_EXCL)) {
- bdgrab(bdev);
- if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
- return -EBUSY;
- }
+ if (!(mode & FMODE_EXCL) &&
+ blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+ return -EBUSY;
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);
diff --git a/trunk/drivers/acpi/acpica/aclocal.h b/trunk/drivers/acpi/acpica/aclocal.h
index edc25867ad9d..54784bb42cec 100644
--- a/trunk/drivers/acpi/acpica/aclocal.h
+++ b/trunk/drivers/acpi/acpica/aclocal.h
@@ -416,15 +416,10 @@ struct acpi_gpe_handler_info {
u8 originally_enabled; /* True if GPE was originally enabled */
};
-struct acpi_gpe_notify_object {
- struct acpi_namespace_node *node;
- struct acpi_gpe_notify_object *next;
-};
-
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
- struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
+ struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
};
/*
diff --git a/trunk/drivers/acpi/acpica/evgpe.c b/trunk/drivers/acpi/acpica/evgpe.c
index f4725212eb48..14988a86066f 100644
--- a/trunk/drivers/acpi/acpica/evgpe.c
+++ b/trunk/drivers/acpi/acpica/evgpe.c
@@ -457,7 +457,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
acpi_status status;
struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info;
- struct acpi_gpe_notify_object *notify_object;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
@@ -509,18 +508,10 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* from this thread -- because handlers may in turn run other
* control methods.
*/
- status = acpi_ev_queue_notify_request(
- local_gpe_event_info->dispatch.device.node,
- ACPI_NOTIFY_DEVICE_WAKE);
-
- notify_object = local_gpe_event_info->dispatch.device.next;
- while (ACPI_SUCCESS(status) && notify_object) {
- status = acpi_ev_queue_notify_request(
- notify_object->node,
- ACPI_NOTIFY_DEVICE_WAKE);
- notify_object = notify_object->next;
- }
-
+ status =
+ acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
+ device_node,
+ ACPI_NOTIFY_DEVICE_WAKE);
break;
case ACPI_GPE_DISPATCH_METHOD:
diff --git a/trunk/drivers/acpi/acpica/evxfgpe.c b/trunk/drivers/acpi/acpica/evxfgpe.c
index 52aaff3df562..e9562a7cb2f9 100644
--- a/trunk/drivers/acpi/acpica/evxfgpe.c
+++ b/trunk/drivers/acpi/acpica/evxfgpe.c
@@ -198,9 +198,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_namespace_node *device_node;
- struct acpi_gpe_notify_object *notify_object;
acpi_cpu_flags flags;
- u8 gpe_dispatch_mask;
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
@@ -214,62 +212,37 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Ensure that we have a valid GPE number */
-
- gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
- if (!gpe_event_info) {
- goto unlock_and_exit;
- }
-
- if (wake_device == ACPI_ROOT_OBJECT) {
- goto out;
- }
-
- /*
- * If there is no method or handler for this GPE, then the
- * wake_device will be notified whenever this GPE fires (aka
- * "implicit notify") Note: The GPE is assumed to be
- * level-triggered (for windows compatibility).
- */
- gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
- if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
- && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
- goto out;
- }
-
/* Validate wake_device is of type Device */
device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
if (device_node->type != ACPI_TYPE_DEVICE) {
- goto unlock_and_exit;
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
- gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
- ACPI_GPE_LEVEL_TRIGGERED);
- gpe_event_info->dispatch.device.node = device_node;
- gpe_event_info->dispatch.device.next = NULL;
- } else {
- /* There are multiple devices to notify implicitly. */
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
- if (!notify_object) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (gpe_event_info) {
+ /*
+ * If there is no method or handler for this GPE, then the
+ * wake_device will be notified whenever this GPE fires (aka
+ * "implicit notify") Note: The GPE is assumed to be
+ * level-triggered (for windows compatibility).
+ */
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_NONE) {
+ gpe_event_info->flags =
+ (ACPI_GPE_DISPATCH_NOTIFY |
+ ACPI_GPE_LEVEL_TRIGGERED);
+ gpe_event_info->dispatch.device_node = device_node;
}
- notify_object->node = device_node;
- notify_object->next = gpe_event_info->dispatch.device.next;
- gpe_event_info->dispatch.device.next = notify_object;
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ status = AE_OK;
}
- out:
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- status = AE_OK;
-
- unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
diff --git a/trunk/drivers/acpi/debugfs.c b/trunk/drivers/acpi/debugfs.c
index 384f7abcff77..5df67f1d6c61 100644
--- a/trunk/drivers/acpi/debugfs.c
+++ b/trunk/drivers/acpi/debugfs.c
@@ -26,9 +26,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
- static u32 max_size;
- static u32 uncopied_bytes;
-
+ static int uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
@@ -39,24 +37,19 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
- uncopied_bytes = max_size = table.length;
- buf = kzalloc(max_size, GFP_KERNEL);
+ uncopied_bytes = table.length;
+ buf = kzalloc(uncopied_bytes, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
- if (buf == NULL)
- return -EINVAL;
-
- if ((*ppos > max_size) ||
- (*ppos + count > max_size) ||
- (*ppos + count < count) ||
- (count > uncopied_bytes))
+ if (uncopied_bytes < count) {
+ kfree(buf);
return -EINVAL;
+ }
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
- buf = NULL;
return -EFAULT;
}
@@ -66,7 +59,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
- buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/trunk/drivers/acpi/osl.c b/trunk/drivers/acpi/osl.c
index c90c76aa7f8b..b0931818cf98 100644
--- a/trunk/drivers/acpi/osl.c
+++ b/trunk/drivers/acpi/osl.c
@@ -636,21 +636,17 @@ EXPORT_SYMBOL(acpi_os_write_port);
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
{
- void __iomem *virt_addr;
- unsigned int size = width / 8;
- bool unmap = false;
u32 dummy;
+ void __iomem *virt_addr;
+ int size = width / 8, unmap = 0;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ rcu_read_unlock();
if (!virt_addr) {
- rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
- if (!virt_addr)
- return AE_BAD_ADDRESS;
- unmap = true;
+ unmap = 1;
}
-
if (!value)
value = &dummy;
@@ -670,8 +666,6 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
if (unmap)
iounmap(virt_addr);
- else
- rcu_read_unlock();
return AE_OK;
}
@@ -680,17 +674,14 @@ acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
{
void __iomem *virt_addr;
- unsigned int size = width / 8;
- bool unmap = false;
+ int size = width / 8, unmap = 0;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ rcu_read_unlock();
if (!virt_addr) {
- rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
- if (!virt_addr)
- return AE_BAD_ADDRESS;
- unmap = true;
+ unmap = 1;
}
switch (width) {
@@ -709,8 +700,6 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
if (unmap)
iounmap(virt_addr);
- else
- rcu_read_unlock();
return AE_OK;
}
diff --git a/trunk/drivers/acpi/video_detect.c b/trunk/drivers/acpi/video_detect.c
index 5af3479714f6..42d3d72dae85 100644
--- a/trunk/drivers/acpi/video_detect.c
+++ b/trunk/drivers/acpi/video_detect.c
@@ -82,11 +82,6 @@ long acpi_is_video_device(struct acpi_device *device)
if (!device)
return 0;
- /* Is this device able to support video switching ? */
- if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
- ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
- video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
-
/* Is this device able to retrieve a video ROM ? */
if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
diff --git a/trunk/drivers/acpi/wakeup.c b/trunk/drivers/acpi/wakeup.c
index 7bfbe40bc43b..ed6501452507 100644
--- a/trunk/drivers/acpi/wakeup.c
+++ b/trunk/drivers/acpi/wakeup.c
@@ -86,12 +86,8 @@ int __init acpi_wakeup_device_init(void)
struct acpi_device *dev = container_of(node,
struct acpi_device,
wakeup_list);
- if (device_can_wakeup(&dev->dev)) {
- /* Button GPEs are supposed to be always enabled. */
- acpi_enable_gpe(dev->wakeup.gpe_device,
- dev->wakeup.gpe_number);
+ if (device_can_wakeup(&dev->dev))
device_set_wakeup_enable(&dev->dev, true);
- }
}
mutex_unlock(&acpi_device_lock);
return 0;
diff --git a/trunk/drivers/atm/solos-pci.c b/trunk/drivers/atm/solos-pci.c
index 25ef1a4556e6..73fb1c4f4cd4 100644
--- a/trunk/drivers/atm/solos-pci.c
+++ b/trunk/drivers/atm/solos-pci.c
@@ -866,9 +866,8 @@ static int popen(struct atm_vcc *vcc)
}
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
- if (!skb) {
- if (net_ratelimit())
- dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+ if (!skb && net_ratelimit()) {
+ dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
diff --git a/trunk/drivers/block/Makefile b/trunk/drivers/block/Makefile
index 40528ba56d1b..d7f463d6312d 100644
--- a/trunk/drivers/block/Makefile
+++ b/trunk/drivers/block/Makefile
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
-swim_mod-y := swim.o swim_asm.o
+swim_mod-objs := swim.o swim_asm.o
diff --git a/trunk/drivers/block/aoe/Makefile b/trunk/drivers/block/aoe/Makefile
index 06ea82cdf27d..e76d997183c6 100644
--- a/trunk/drivers/block/aoe/Makefile
+++ b/trunk/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_ATA_OVER_ETH) += aoe.o
-aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
+aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c
index 9279272b3732..516d5bbec2b6 100644
--- a/trunk/drivers/block/cciss.c
+++ b/trunk/drivers/block/cciss.c
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk)
sector_t total_size;
InquiryData_struct *inq_buff = NULL;
- for (logvol = 0; logvol <= h->highest_lun; logvol++) {
+ for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
if (!h->drv[logvol])
continue;
if (memcmp(h->drv[logvol]->LunID, drv->LunID,
diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c
index 77fc76f8aea9..b9ba04fc2b34 100644
--- a/trunk/drivers/block/floppy.c
+++ b/trunk/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev, true);
+ __invalidate_device(bdev);
}
mutex_unlock(&open_lock);
} else {
diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c
index dbf31ec9114d..44e18c073c44 100644
--- a/trunk/drivers/block/loop.c
+++ b/trunk/drivers/block/loop.c
@@ -78,6 +78,7 @@
#include
+static DEFINE_MUTEX(loop_mutex);
static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex);
@@ -1500,9 +1501,11 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo = bdev->bd_disk->private_data;
+ mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_mutex);
return 0;
}
@@ -1512,6 +1515,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data;
int err;
+ mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt)
@@ -1536,6 +1540,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
+ mutex_unlock(&loop_mutex);
return 0;
}
@@ -1636,9 +1641,6 @@ static struct loop_device *loop_alloc(int i)
static void loop_free(struct loop_device *lo)
{
- if (!lo->lo_queue->queue_lock)
- lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
-
blk_cleanup_queue(lo->lo_queue);
put_disk(lo->lo_disk);
list_del(&lo->lo_list);
diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c
index e6fc716aca45..a32fb41246f8 100644
--- a/trunk/drivers/block/nbd.c
+++ b/trunk/drivers/block/nbd.c
@@ -53,6 +53,7 @@
#define DBG_BLKDEV 0x0100
#define DBG_RX 0x0200
#define DBG_TX 0x0400
+static DEFINE_MUTEX(nbd_mutex);
static unsigned int debugflags;
#endif /* NDEBUG */
@@ -717,9 +718,11 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
+ mutex_lock(&nbd_mutex);
mutex_lock(&lo->tx_lock);
error = __nbd_ioctl(bdev, lo, cmd, arg);
mutex_unlock(&lo->tx_lock);
+ mutex_unlock(&nbd_mutex);
return error;
}
diff --git a/trunk/drivers/block/xen-blkfront.c b/trunk/drivers/block/xen-blkfront.c
index 9cb8668ff5f4..d7aa39e349a6 100644
--- a/trunk/drivers/block/xen-blkfront.c
+++ b/trunk/drivers/block/xen-blkfront.c
@@ -120,10 +120,6 @@ static DEFINE_SPINLOCK(minor_lock);
#define EXTENDED (1<shadow[id].request = req;
ring_req->id = id;
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -321,7 +317,7 @@ static int blkif_queue_request(struct request *req)
rq_data_dir(req) );
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
- ring_req->u.rw.seg[i] =
+ ring_req->seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
@@ -438,65 +434,6 @@ static void xlvbd_flush(struct blkfront_info *info)
info->feature_flush ? "enabled" : "disabled");
}
-static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
-{
- int major;
- major = BLKIF_MAJOR(vdevice);
- *minor = BLKIF_MINOR(vdevice);
- switch (major) {
- case XEN_IDE0_MAJOR:
- *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
- *minor = ((*minor / 64) * PARTS_PER_DISK) +
- EMULATED_HD_DISK_MINOR_OFFSET;
- break;
- case XEN_IDE1_MAJOR:
- *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
- *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
- EMULATED_HD_DISK_MINOR_OFFSET;
- break;
- case XEN_SCSI_DISK0_MAJOR:
- *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
- *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
- break;
- case XEN_SCSI_DISK1_MAJOR:
- case XEN_SCSI_DISK2_MAJOR:
- case XEN_SCSI_DISK3_MAJOR:
- case XEN_SCSI_DISK4_MAJOR:
- case XEN_SCSI_DISK5_MAJOR:
- case XEN_SCSI_DISK6_MAJOR:
- case XEN_SCSI_DISK7_MAJOR:
- *offset = (*minor / PARTS_PER_DISK) +
- ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
- EMULATED_SD_DISK_NAME_OFFSET;
- *minor = *minor +
- ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
- EMULATED_SD_DISK_MINOR_OFFSET;
- break;
- case XEN_SCSI_DISK8_MAJOR:
- case XEN_SCSI_DISK9_MAJOR:
- case XEN_SCSI_DISK10_MAJOR:
- case XEN_SCSI_DISK11_MAJOR:
- case XEN_SCSI_DISK12_MAJOR:
- case XEN_SCSI_DISK13_MAJOR:
- case XEN_SCSI_DISK14_MAJOR:
- case XEN_SCSI_DISK15_MAJOR:
- *offset = (*minor / PARTS_PER_DISK) +
- ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
- EMULATED_SD_DISK_NAME_OFFSET;
- *minor = *minor +
- ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
- EMULATED_SD_DISK_MINOR_OFFSET;
- break;
- case XENVBD_MAJOR:
- *offset = *minor / PARTS_PER_DISK;
- break;
- default:
- printk(KERN_WARNING "blkfront: your disk configuration is "
- "incorrect, please use an xvd device instead\n");
- return -ENODEV;
- }
- return 0;
-}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
@@ -504,7 +441,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
{
struct gendisk *gd;
int nr_minors = 1;
- int err;
+ int err = -ENODEV;
unsigned int offset;
int minor;
int nr_parts;
@@ -519,20 +456,12 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
}
if (!VDEV_IS_EXTENDED(info->vdevice)) {
- err = xen_translate_vdev(info->vdevice, &minor, &offset);
- if (err)
- return err;
- nr_parts = PARTS_PER_DISK;
+ minor = BLKIF_MINOR(info->vdevice);
+ nr_parts = PARTS_PER_DISK;
} else {
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
- offset = minor / nr_parts;
- if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
- printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
- "emulated IDE disks,\n\t choose an xvd device name"
- "from xvde on\n", info->vdevice);
}
- err = -ENODEV;
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -546,6 +475,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (gd == NULL)
goto release;
+ offset = minor / nr_parts;
+
if (nr_minors > 1) {
if (offset < 26)
sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
@@ -684,7 +615,7 @@ static void blkif_completion(struct blk_shadow *s)
{
int i;
for (i = 0; i < s->req.nr_segments; i++)
- gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
+ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1001,7 +932,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Rewrite any grant references invalidated by susp/resume. */
for (j = 0; j < req->nr_segments; j++)
gnttab_grant_foreign_access_ref(
- req->u.rw.seg[j].gref,
+ req->seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
rq_data_dir(info->shadow[req->id].request));
diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c
index 6dcd55a74c0a..a126e614601f 100644
--- a/trunk/drivers/bluetooth/ath3k.c
+++ b/trunk/drivers/bluetooth/ath3k.c
@@ -39,11 +39,6 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
- /* Atheros AR9285 Malbec with sflash firmware */
- { USB_DEVICE(0x03F0, 0x311D) },
-
- /* Atheros AR5BBU12 with sflash firmware */
- { USB_DEVICE(0x0489, 0xE02C) },
{ } /* Terminating entry */
};
diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c
index 700a3840fddc..1da773f899a2 100644
--- a/trunk/drivers/bluetooth/btusb.c
+++ b/trunk/drivers/bluetooth/btusb.c
@@ -102,12 +102,6 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
- /* Atheros AR9285 Malbec with sflash firmware */
- { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
-
- /* Atheros AR5BBU12 with sflash firmware */
- { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
-
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -832,7 +826,7 @@ static void btusb_work(struct work_struct *work)
if (hdev->conn_hash.sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
+ err = usb_autopm_get_interface(data->isoc);
if (err < 0) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -861,7 +855,7 @@ static void btusb_work(struct work_struct *work)
__set_isoc_interface(hdev, 0);
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
+ usb_autopm_put_interface(data->isoc);
}
}
@@ -1044,6 +1038,8 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
+ usb_enable_autosuspend(interface_to_usbdev(intf));
+
return 0;
}
diff --git a/trunk/drivers/cdrom/cdrom.c b/trunk/drivers/cdrom/cdrom.c
index e2c48a7eccff..14033a36bcd0 100644
--- a/trunk/drivers/cdrom/cdrom.c
+++ b/trunk/drivers/cdrom/cdrom.c
@@ -409,8 +409,7 @@ int register_cdrom(struct cdrom_device_info *cdi)
}
ENSURE(drive_status, CDC_DRIVE_STATUS );
- if (cdo->check_events == NULL && cdo->media_changed == NULL)
- *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
+ ENSURE(media_changed, CDC_MEDIA_CHANGED);
ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(lock_door, CDC_LOCK);
ENSURE(select_speed, CDC_SELECT_SPEED);
diff --git a/trunk/drivers/char/Makefile b/trunk/drivers/char/Makefile
index 8238f89f73c9..5bc765d4c3ca 100644
--- a/trunk/drivers/char/Makefile
+++ b/trunk/drivers/char/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
-obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MSPEC) += mspec.o
diff --git a/trunk/drivers/char/agp/amd64-agp.c b/trunk/drivers/char/agp/amd64-agp.c
index 780498d76581..9252e85706ef 100644
--- a/trunk/drivers/char/agp/amd64-agp.c
+++ b/trunk/drivers/char/agp/amd64-agp.c
@@ -773,23 +773,18 @@ int __init agp_amd64_init(void)
#else
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
#endif
- pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd_nb_misc_ids)) {
- pci_unregister_driver(&agp_amd64_pci_driver);
+ if (!pci_dev_present(amd_nb_misc_ids))
return -ENODEV;
- }
/* Look for any AGP bridge */
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0) {
- pci_unregister_driver(&agp_amd64_pci_driver);
+ if (err == 0 && agp_bridges_found == 0)
err = -ENODEV;
- }
}
return err;
}
diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h
index 5feebe2800e9..c195bfeade11 100644
--- a/trunk/drivers/char/agp/intel-agp.h
+++ b/trunk/drivers/char/agp/intel-agp.h
@@ -130,7 +130,6 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
-#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c
index 0d09b537bb9a..fab3d3265adb 100644
--- a/trunk/drivers/char/agp/intel-gtt.c
+++ b/trunk/drivers/char/agp/intel-gtt.c
@@ -21,7 +21,6 @@
#include
#include
#include
-#include
#include
#include "agp.h"
#include "intel-agp.h"
@@ -71,8 +70,12 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
- void __iomem *i9xx_flush_page;
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+ };
char *i81x_gtt_table;
+ struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -719,6 +722,28 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
+ if (intel_private.i8xx_flush_page) {
+ kunmap(intel_private.i8xx_flush_page);
+ intel_private.i8xx_flush_page = NULL;
+ }
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+ /* return if we've already set the flush mechanism up */
+ if (intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL);
+ if (!intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+ i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -733,27 +758,14 @@ static void i830_cleanup(void)
*/
static void i830_chipset_flush(void)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- /* Forcibly evict everything from the CPU write buffers.
- * clflush appears to be insufficient.
- */
- wbinvd_on_all_cpus();
-
- /* Now we've only seen documents for this magic bit on 855GM,
- * we hope it exists for the other gen2 chipsets...
- *
- * Also works as advertised on my 845G.
- */
- writel(readl(intel_private.registers+I830_HIC) | (1<<31),
- intel_private.registers+I830_HIC);
+ unsigned int *pg = intel_private.i8xx_flush_page;
- while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
- if (time_after(jiffies, timeout))
- break;
+ memset(pg, 0, 1024);
- udelay(50);
- }
+ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -837,6 +849,8 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_i830_setup_flush();
+
return 0;
}
diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c
index 62787e30d508..b6ae6e9a9c5f 100644
--- a/trunk/drivers/char/ipmi/ipmi_si_intf.c
+++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c
@@ -320,7 +320,6 @@ static int unload_when_empty = 1;
static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
-static void cleanup_ipmi_si(void);
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block *nb)
@@ -900,14 +899,6 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
- /*
- * last_timeout_jiffies is updated here to avoid
- * smi_timeout() handler passing very large time_diff
- * value to smi_event_handler() that causes
- * the send command to abort.
- */
- smi_info->last_timeout_jiffies = jiffies;
-
mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
@@ -3459,7 +3450,16 @@ static int __devinit init_ipmi_si(void)
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
- cleanup_ipmi_si();
+#ifdef CONFIG_PCI
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
+#endif
+
+#ifdef CONFIG_PPC_OF
+ if (of_registered)
+ of_unregister_platform_driver(&ipmi_of_platform_driver);
+#endif
+ driver_unregister(&ipmi_driver.driver);
printk(KERN_WARNING PFX
"Unable to find any System Interface(s)\n");
return -ENODEV;
diff --git a/trunk/drivers/char/mmtimer.c b/trunk/drivers/char/mmtimer.c
index 33dc2298af73..e6d75627c6c8 100644
--- a/trunk/drivers/char/mmtimer.c
+++ b/trunk/drivers/char/mmtimer.c
@@ -53,8 +53,6 @@ MODULE_LICENSE("GPL");
#define RTC_BITS 55 /* 55 bits for this implementation */
-static struct k_clock sgi_clock;
-
extern unsigned long sn_rtc_cycles_per_second;
#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
@@ -489,7 +487,7 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
return 0;
};
-static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
+static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
{
u64 nsec;
@@ -765,21 +763,15 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
return err;
}
-static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
-{
- tp->tv_sec = 0;
- tp->tv_nsec = sgi_clock_period;
- return 0;
-}
-
static struct k_clock sgi_clock = {
- .clock_set = sgi_clock_set,
- .clock_get = sgi_clock_get,
- .clock_getres = sgi_clock_getres,
- .timer_create = sgi_timer_create,
- .timer_set = sgi_timer_set,
- .timer_del = sgi_timer_del,
- .timer_get = sgi_timer_get
+ .res = 0,
+ .clock_set = sgi_clock_set,
+ .clock_get = sgi_clock_get,
+ .timer_create = sgi_timer_create,
+ .nsleep = do_posix_clock_nonanosleep,
+ .timer_set = sgi_timer_set,
+ .timer_del = sgi_timer_del,
+ .timer_get = sgi_timer_get
};
/**
@@ -839,8 +831,8 @@ static int __init mmtimer_init(void)
(unsigned long) node);
}
- sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
- posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
+ sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
+ register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
sn_rtc_cycles_per_second/(unsigned long)1E6);
diff --git a/trunk/drivers/char/pcmcia/cm4000_cs.c b/trunk/drivers/char/pcmcia/cm4000_cs.c
index bcbbc71febb7..777181a2e603 100644
--- a/trunk/drivers/char/pcmcia/cm4000_cs.c
+++ b/trunk/drivers/char/pcmcia/cm4000_cs.c
@@ -830,7 +830,8 @@ static void monitor_card(unsigned long p)
test_bit(IS_ANY_T1, &dev->flags))) {
DEBUGP(4, dev, "Perform AUTOPPS\n");
set_bit(IS_AUTOPPS_ACT, &dev->flags);
- ptsreq.protocol = (0x01 << dev->proto);
+ ptsreq.protocol = ptsreq.protocol =
+ (0x01 << dev->proto);
ptsreq.flags = 0x01;
ptsreq.pts1 = 0x00;
ptsreq.pts2 = 0x00;
diff --git a/trunk/drivers/char/pcmcia/ipwireless/main.c b/trunk/drivers/char/pcmcia/ipwireless/main.c
index 444155a305ae..94b8eb4d691d 100644
--- a/trunk/drivers/char/pcmcia/ipwireless/main.c
+++ b/trunk/drivers/char/pcmcia/ipwireless/main.c
@@ -78,6 +78,7 @@ static void signalled_reboot_callback(void *callback_data)
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
{
struct ipw_dev *ipw = priv_data;
+ struct resource *io_resource;
int ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -91,12 +92,9 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
if (ret)
return ret;
- if (!request_region(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]),
- IPWIRELESS_PCCARD_NAME)) {
- ret = -EBUSY;
- goto exit;
- }
+ io_resource = request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
+ IPWIRELESS_PCCARD_NAME);
p_dev->resource[2]->flags |=
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -107,25 +105,22 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
if (ret != 0)
- goto exit1;
+ goto exit2;
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
- ipw->common_memory = ioremap(p_dev->resource[2]->start,
+ ipw->attr_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
- if (!request_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]),
- IPWIRELESS_PCCARD_NAME)) {
- ret = -EBUSY;
- goto exit2;
- }
+ request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME);
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
WIN_ENABLE;
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
if (ret != 0)
- goto exit3;
+ goto exit2;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
if (ret != 0)
@@ -133,28 +128,23 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
- if (!request_mem_region(p_dev->resource[3]->start,
- resource_size(p_dev->resource[3]),
- IPWIRELESS_PCCARD_NAME)) {
- ret = -EBUSY;
- goto exit4;
- }
+ request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME);
return 0;
-exit4:
- iounmap(ipw->attr_memory);
exit3:
- release_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]));
exit2:
- iounmap(ipw->common_memory);
+ if (ipw->common_memory) {
+ release_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
+ iounmap(ipw->common_memory);
+ }
exit1:
- release_region(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]));
-exit:
+ release_resource(io_resource);
pcmcia_disable_device(p_dev);
- return ret;
+ return -1;
}
static int config_ipwireless(struct ipw_dev *ipw)
@@ -229,8 +219,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
static void release_ipwireless(struct ipw_dev *ipw)
{
- release_region(ipw->link->resource[0]->start,
- resource_size(ipw->link->resource[0]));
if (ipw->common_memory) {
release_mem_region(ipw->link->resource[2]->start,
resource_size(ipw->link->resource[2]));
diff --git a/trunk/drivers/char/tpm/tpm.c b/trunk/drivers/char/tpm/tpm.c
index 1f46f1cd9225..36e0fa161c2b 100644
--- a/trunk/drivers/char/tpm/tpm.c
+++ b/trunk/drivers/char/tpm/tpm.c
@@ -364,12 +364,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED)
+ if (duration_idx != TPM_UNDEFINED) {
duration = chip->vendor.duration[duration_idx];
- if (duration <= 0)
+ /* if duration is 0, it's because chip->vendor.duration wasn't */
+ /* filled yet, so we set the lowest timeout just to give enough */
+ /* time for tpm_get_timeouts() to succeed */
+ return (duration <= 0 ? HZ : duration);
+ } else
return 2 * 60 * HZ;
- else
- return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c
index 5cb4d09919d6..1109f6848a43 100644
--- a/trunk/drivers/cpufreq/cpufreq.c
+++ b/trunk/drivers/cpufreq/cpufreq.c
@@ -1919,10 +1919,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = sysdev_driver_register(&cpu_sysdev_class,
&cpufreq_sysdev_driver);
- if (ret)
- goto err_null_driver;
- if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
@@ -1937,22 +1935,21 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
dprintk("no CPU initialized for driver %s\n",
driver_data->name);
- goto err_sysdev_unreg;
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- dprintk("driver %s up and running\n", driver_data->name);
- cpufreq_debug_enable_ratelimit();
+ if (!ret) {
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ dprintk("driver %s up and running\n", driver_data->name);
+ cpufreq_debug_enable_ratelimit();
+ }
- return 0;
-err_sysdev_unreg:
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-err_null_driver:
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/trunk/drivers/dma/amba-pl08x.c b/trunk/drivers/dma/amba-pl08x.c
index 07bca4970e50..297f48b0cba9 100644
--- a/trunk/drivers/dma/amba-pl08x.c
+++ b/trunk/drivers/dma/amba-pl08x.c
@@ -79,7 +79,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -236,19 +235,16 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
}
/*
- * Pause the channel by setting the HALT bit.
+ * Overall DMAC remains enabled always.
*
- * For M->P transfers, pause the DMAC first and then stop the peripheral -
- * the FIFO can only drain if the peripheral is still requesting data.
- * (note: this can still timeout if the DMAC FIFO never drains of data.)
+ * Disabling individual channels could lose data.
*
- * For P->M transfers, disable the peripheral first to stop it filling
- * the DMAC FIFO, and then pause the DMAC.
+ * Disable the peripheral DMA after disabling the DMAC in order to allow
+ * the DMAC FIFO to drain, and hence allow the channel to show inactive
*/
static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
{
u32 val;
- int timeout;
/* Set the HALT bit and wait for the FIFO to drain */
val = readl(ch->base + PL080_CH_CONFIG);
@@ -256,13 +252,8 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
writel(val, ch->base + PL080_CH_CONFIG);
/* Wait for channel inactive */
- for (timeout = 1000; timeout; timeout--) {
- if (!pl08x_phy_channel_busy(ch))
- break;
- udelay(1);
- }
- if (pl08x_phy_channel_busy(ch))
- pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
+ while (pl08x_phy_channel_busy(ch))
+ cpu_relax();
}
static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -276,24 +267,19 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
}
-/*
- * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
- * clears any pending interrupt status. This should not be used for
- * an on-going transfer, but as a method of shutting down a channel
- * (eg, when it's no longer used) or terminating a transfer.
- */
-static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
- struct pl08x_phy_chan *ch)
+/* Stops the channel */
+static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
{
- u32 val = readl(ch->base + PL080_CH_CONFIG);
+ u32 val;
- val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
- PL080_CONFIG_TC_IRQ_MASK);
+ pl08x_pause_phy_chan(ch);
+ /* Disable channel */
+ val = readl(ch->base + PL080_CH_CONFIG);
+ val &= ~PL080_CONFIG_ENABLE;
+ val &= ~PL080_CONFIG_ERR_IRQ_MASK;
+ val &= ~PL080_CONFIG_TC_IRQ_MASK;
writel(val, ch->base + PL080_CH_CONFIG);
-
- writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
- writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
}
static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -418,12 +404,13 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
{
unsigned long flags;
- spin_lock_irqsave(&ch->lock, flags);
-
/* Stop the channel and clear its interrupts */
- pl08x_terminate_phy_chan(pl08x, ch);
+ pl08x_stop_phy_chan(ch);
+ writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
+ writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
/* Mark it as free */
+ spin_lock_irqsave(&ch->lock, flags);
ch->serving = NULL;
spin_unlock_irqrestore(&ch->lock, flags);
}
@@ -1462,7 +1449,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
plchan->state = PL08X_CHAN_IDLE;
if (plchan->phychan) {
- pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+ pl08x_stop_phy_chan(plchan->phychan);
/*
* Mark physical channel as free and free any slave
diff --git a/trunk/drivers/dma/imx-dma.c b/trunk/drivers/dma/imx-dma.c
index e18eaabe92b9..e53d438142bb 100644
--- a/trunk/drivers/dma/imx-dma.c
+++ b/trunk/drivers/dma/imx-dma.c
@@ -49,7 +49,6 @@ struct imxdma_channel {
struct imxdma_engine {
struct device *dev;
- struct device_dma_parameters dma_parms;
struct dma_device dma_device;
struct imxdma_channel channel[MAX_DMA_CHANNELS];
};
@@ -243,21 +242,6 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
else
dmamode = DMA_MODE_WRITE;
- switch (imxdmac->word_size) {
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- if (sgl->length & 3 || sgl->dma_address & 3)
- return NULL;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- if (sgl->length & 1 || sgl->dma_address & 1)
- return NULL;
- break;
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- break;
- default:
- return NULL;
- }
-
ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
dma_length, imxdmac->per_address, dmamode);
if (ret)
@@ -345,9 +329,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&imxdma->dma_device.channels);
- dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
- dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
-
/* Initialize channel parameters */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -365,7 +346,11 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdmac->imxdma = imxdma;
spin_lock_init(&imxdmac->lock);
+ dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
imxdmac->chan.device = &imxdma->dma_device;
+ imxdmac->chan.chan_id = i;
imxdmac->channel = i;
/* Add the channel to the DMAC list */
@@ -385,9 +370,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxdma);
- imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
- dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
-
ret = dma_async_device_register(&imxdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
diff --git a/trunk/drivers/dma/imx-sdma.c b/trunk/drivers/dma/imx-sdma.c
index b6d1455fa936..d5a5d4d9c19b 100644
--- a/trunk/drivers/dma/imx-sdma.c
+++ b/trunk/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
* struct sdma_channel - housekeeping for a SDMA channel
*
* @sdma pointer to the SDMA engine for this channel
- * @channel the channel number, matches dmaengine chan_id + 1
+ * @channel the channel number, matches dmaengine chan_id
* @direction transfer type. Needed for setting SDMA script
* @peripheral_type Peripheral type. Needed for setting SDMA script
* @event_id0 aka dma request line
@@ -301,7 +301,6 @@ struct sdma_firmware_header {
struct sdma_engine {
struct device *dev;
- struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
@@ -450,7 +449,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
if (bd->mode.status & BD_RROR)
sdmac->status = DMA_ERROR;
else
- sdmac->status = DMA_IN_PROGRESS;
+ sdmac->status = DMA_SUCCESS;
bd->mode.status |= BD_DONE;
sdmac->buf_tail++;
@@ -771,15 +770,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
}
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
{
- dma_cookie_t cookie = sdmac->chan.cookie;
+ dma_cookie_t cookie = sdma->chan.cookie;
if (++cookie < 0)
cookie = 1;
- sdmac->chan.cookie = cookie;
- sdmac->desc.cookie = cookie;
+ sdma->chan.cookie = cookie;
+ sdma->desc.cookie = cookie;
return cookie;
}
@@ -799,7 +798,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = sdma_assign_cookie(sdmac);
- sdma_enable_channel(sdma, sdmac->channel);
+ sdma_enable_channel(sdma, tx->chan->chan_id);
spin_unlock_irq(&sdmac->lock);
@@ -812,6 +811,10 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
struct imx_dma_data *data = chan->private;
int prio, ret;
+ /* No need to execute this for internal channel 0 */
+ if (chan->chan_id == 0)
+ return 0;
+
if (!data)
return -EINVAL;
@@ -876,7 +879,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int ret, i, count;
- int channel = sdmac->channel;
+ int channel = chan->chan_id;
struct scatterlist *sg;
if (sdmac->status == DMA_IN_PROGRESS)
@@ -921,33 +924,22 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
ret = -EINVAL;
goto err_out;
}
-
- switch (sdmac->word_size) {
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
bd->mode.command = 0;
- if (count & 3 || sg->dma_address & 3)
- return NULL;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- bd->mode.command = 2;
- if (count & 1 || sg->dma_address & 1)
- return NULL;
- break;
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- bd->mode.command = 1;
- break;
- default:
- return NULL;
- }
+ else
+ bd->mode.command = sdmac->word_size;
param = BD_DONE | BD_EXTD | BD_CONT;
- if (i + 1 == sg_len) {
+ if (sdmac->flags & IMX_DMA_SG_LOOP) {
param |= BD_INTR;
- param |= BD_LAST;
- param &= ~BD_CONT;
+ if (i + 1 == sg_len)
+ param |= BD_WRAP;
}
+ if (i + 1 == sg_len)
+ param |= BD_INTR;
+
dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
i, count, sg->dma_address,
param & BD_WRAP ? "wrap" : "",
@@ -961,7 +953,6 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
return &sdmac->desc;
err_out:
- sdmac->status = DMA_ERROR;
return NULL;
}
@@ -972,7 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
int num_periods = buf_len / period_len;
- int channel = sdmac->channel;
+ int channel = chan->chan_id;
int ret, i = 0, buf = 0;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1075,12 +1066,14 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
dma_cookie_t last_used;
+ enum dma_status ret;
last_used = chan->cookie;
+ ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
- return sdmac->status;
+ return ret;
}
static void sdma_issue_pending(struct dma_chan *chan)
@@ -1142,7 +1135,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
/* download the RAM image for SDMA */
sdma_load_script(sdma, ram_code,
header->ram_code_size,
- addr->ram_code_start_addr);
+ sdma->script_addrs->ram_code_start_addr);
clk_disable(sdma->clk);
sdma_add_scripts(sdma, addr);
@@ -1244,6 +1237,7 @@ static int __init sdma_probe(struct platform_device *pdev)
struct resource *iores;
struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i;
+ dma_cap_mask_t mask;
struct sdma_engine *sdma;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1286,9 +1280,6 @@ static int __init sdma_probe(struct platform_device *pdev)
sdma->version = pdata->sdma_version;
- dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
- dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
-
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1297,17 +1288,15 @@ static int __init sdma_probe(struct platform_device *pdev)
sdmac->sdma = sdma;
spin_lock_init(&sdmac->lock);
+ dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
sdmac->chan.device = &sdma->dma_device;
+ sdmac->chan.chan_id = i;
sdmac->channel = i;
- /*
- * Add the channel to the DMAC list. Do not add channel 0 though
- * because we need it internally in the SDMA driver. This also means
- * that channel 0 in dmaengine counting matches sdma channel 1.
- */
- if (i)
- list_add_tail(&sdmac->chan.device_node,
- &sdma->dma_device.channels);
+ /* Add the channel to the DMAC list */
+ list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
}
ret = sdma_init(sdma);
@@ -1328,8 +1317,6 @@ static int __init sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_control = sdma_control;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
- sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
- dma_set_max_seg_size(sdma->dma_device.dev, 65535);
ret = dma_async_device_register(&sdma->dma_device);
if (ret) {
@@ -1337,6 +1324,13 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_init;
}
+ /* request channel 0. This is an internal control channel
+ * to the SDMA engine and not available to clients.
+ */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_request_channel(mask, NULL, NULL);
+
dev_info(sdma->dev, "initialized\n");
return 0;
@@ -1354,7 +1348,7 @@ static int __init sdma_probe(struct platform_device *pdev)
err_request_region:
err_irq:
kfree(sdma);
- return ret;
+ return 0;
}
static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/trunk/drivers/dma/ipu/ipu_idmac.c b/trunk/drivers/dma/ipu/ipu_idmac.c
index c1a125e7d1df..cb26ee9773d6 100644
--- a/trunk/drivers/dma/ipu/ipu_idmac.c
+++ b/trunk/drivers/dma/ipu/ipu_idmac.c
@@ -1145,6 +1145,29 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
+ /*
+ * Problem (observed with channel DMAIC_7): after enabling the channel
+ * and initialising buffers, there comes an interrupt with current still
+ * pointing at buffer 0, whereas it should use buffer 0 first and only
+ * generate an interrupt when it is done, then current should already
+ * point to buffer 1. This spurious interrupt also comes on channel
+ * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
+ * first interrupt, there comes the second with current correctly
+ * pointing to buffer 1 this time. But sometimes this second interrupt
+ * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
+ * the channel seems to prevent the channel from hanging, but it doesn't
+ * prevent the spurious interrupt. This might also be unsafe. Think
+ * about the IDMAC controller trying to switch to a buffer, when we
+ * clear the ready bit, and re-enable it a moment later.
+ */
+ reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
+ idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
+ idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
+
+ reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
+ idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
+ idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
+
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
@@ -1223,6 +1246,33 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
/* Other interrupts do not interfere with this channel */
spin_lock(&ichan->lock);
+ if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
+ ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
+ !list_is_last(ichan->queue.next, &ichan->queue))) {
+ int i = 100;
+
+ /* This doesn't help. See comment in ipu_disable_channel() */
+ while (--i) {
+ curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
+ if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
+ break;
+ cpu_relax();
+ }
+
+ if (!i) {
+ spin_unlock(&ichan->lock);
+ dev_dbg(dev,
+ "IRQ on active buffer on channel %x, active "
+ "%d, ready %x, %x, current %x!\n", chan_id,
+ ichan->active_buffer, ready0, ready1, curbuf);
+ return IRQ_NONE;
+ } else
+ dev_dbg(dev,
+ "Buffer deactivated on channel %x, active "
+ "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
+ ichan->active_buffer, ready0, ready1, curbuf, i);
+ }
+
if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
(!ichan->active_buffer && (ready0 >> chan_id) & 1)
)) {
diff --git a/trunk/drivers/edac/amd64_edac.c b/trunk/drivers/edac/amd64_edac.c
index 23e03554f0d3..4a5ecc58025d 100644
--- a/trunk/drivers/edac/amd64_edac.c
+++ b/trunk/drivers/edac/amd64_edac.c
@@ -826,6 +826,8 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
/* Display and decode various NB registers for debug purposes. */
static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
{
+ int ganged;
+
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
@@ -849,19 +851,28 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1(" DramHoleValid: %s\n",
(pvt->dhar & DHAR_VALID) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(0, pvt);
-
/* everything below this point is Fam10h and above */
- if (boot_cpu_data.x86 == 0xf)
+ if (boot_cpu_data.x86 == 0xf) {
+ amd64_debug_display_dimm_sizes(0, pvt);
return;
-
- amd64_debug_display_dimm_sizes(1, pvt);
+ }
amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
+
+ /*
+ * Determine if ganged and then dump memory sizes for first controller,
+ * and if NOT ganged dump info for 2nd controller.
+ */
+ ganged = dct_ganging_enabled(pvt);
+
+ amd64_debug_display_dimm_sizes(0, pvt);
+
+ if (!ganged)
+ amd64_debug_display_dimm_sizes(1, pvt);
}
/* Read in both of DBAM registers */
@@ -1633,10 +1644,11 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
WARN_ON(ctrl != 0);
}
- dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
+ debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
+ dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+ dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
diff --git a/trunk/drivers/firmware/dmi_scan.c b/trunk/drivers/firmware/dmi_scan.c
index bcb1126e3d00..e28e41668177 100644
--- a/trunk/drivers/firmware/dmi_scan.c
+++ b/trunk/drivers/firmware/dmi_scan.c
@@ -378,17 +378,10 @@ static void __init print_filtered(const char *info)
static void __init dmi_dump_ids(void)
{
- const char *board; /* Board Name is optional */
-
printk(KERN_DEBUG "DMI: ");
- print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
- printk(KERN_CONT " ");
+ print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
+ printk(KERN_CONT "/");
print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
- board = dmi_get_system_info(DMI_BOARD_NAME);
- if (board) {
- printk(KERN_CONT "/");
- print_filtered(board);
- }
printk(KERN_CONT ", BIOS ");
print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
printk(KERN_CONT " ");
diff --git a/trunk/drivers/gpio/ml_ioh_gpio.c b/trunk/drivers/gpio/ml_ioh_gpio.c
index 7f6f01a4b145..cead8e6ff345 100644
--- a/trunk/drivers/gpio/ml_ioh_gpio.c
+++ b/trunk/drivers/gpio/ml_ioh_gpio.c
@@ -326,7 +326,6 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id);
static struct pci_driver ioh_gpio_driver = {
.name = "ml_ioh_gpio",
diff --git a/trunk/drivers/gpio/pca953x.c b/trunk/drivers/gpio/pca953x.c
index b473429eee75..a261972f603d 100644
--- a/trunk/drivers/gpio/pca953x.c
+++ b/trunk/drivers/gpio/pca953x.c
@@ -60,7 +60,6 @@ struct pca953x_chip {
unsigned gpio_start;
uint16_t reg_output;
uint16_t reg_direction;
- struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
@@ -120,17 +119,13 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct pca953x_chip, gpio_chip);
- mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
if (ret)
- goto exit;
+ return ret;
chip->reg_direction = reg_val;
- ret = 0;
-exit:
- mutex_unlock(&chip->i2c_lock);
- return ret;
+ return 0;
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -142,7 +137,6 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
chip = container_of(gc, struct pca953x_chip, gpio_chip);
- mutex_lock(&chip->i2c_lock);
/* set output level */
if (val)
reg_val = chip->reg_output | (1u << off);
@@ -151,7 +145,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
if (ret)
- goto exit;
+ return ret;
chip->reg_output = reg_val;
@@ -159,13 +153,10 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
reg_val = chip->reg_direction & ~(1u << off);
ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
if (ret)
- goto exit;
+ return ret;
chip->reg_direction = reg_val;
- ret = 0;
-exit:
- mutex_unlock(&chip->i2c_lock);
- return ret;
+ return 0;
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -176,9 +167,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct pca953x_chip, gpio_chip);
- mutex_lock(&chip->i2c_lock);
ret = pca953x_read_reg(chip, PCA953X_INPUT, ®_val);
- mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
* do unless gpio_*_value_cansleep() calls become different
@@ -198,7 +187,6 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
chip = container_of(gc, struct pca953x_chip, gpio_chip);
- mutex_lock(&chip->i2c_lock);
if (val)
reg_val = chip->reg_output | (1u << off);
else
@@ -206,11 +194,9 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
if (ret)
- goto exit;
+ return;
chip->reg_output = reg_val;
-exit:
- mutex_unlock(&chip->i2c_lock);
}
static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -531,8 +517,6 @@ static int __devinit pca953x_probe(struct i2c_client *client,
chip->names = pdata->names;
- mutex_init(&chip->i2c_lock);
-
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
diff --git a/trunk/drivers/gpio/pch_gpio.c b/trunk/drivers/gpio/pch_gpio.c
index 2c6af8705103..0eba0a75c804 100644
--- a/trunk/drivers/gpio/pch_gpio.c
+++ b/trunk/drivers/gpio/pch_gpio.c
@@ -286,7 +286,6 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
static struct pci_driver pch_gpio_driver = {
.name = "pch_gpio",
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index f73ef4390db6..6977a1ce9d98 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
- int i, j, rc = 0;
+ int i, rc = 0;
int start;
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
transp = cmap->transp;
start = cmap->start;
- for (j = 0; j < cmap->len; j++) {
+ for (i = 0; i < cmap->len; i++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++;
diff --git a/trunk/drivers/gpu/drm/drm_info.c b/trunk/drivers/gpu/drm/drm_info.c
index be9a9c07d152..3cdbaf379bb5 100644
--- a/trunk/drivers/gpu/drm/drm_info.c
+++ b/trunk/drivers/gpu/drm/drm_info.c
@@ -283,18 +283,17 @@ int drm_vma_info(struct seq_file *m, void *data)
#endif
mutex_lock(&dev->struct_mutex);
- seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
+ seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
atomic_read(&dev->vma_count),
- high_memory, (void *)virt_to_phys(high_memory));
+ high_memory, (u64)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
if (!vma)
continue;
seq_printf(m,
- "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
- pt->pid,
- (void *)vma->vm_start, (void *)vma->vm_end,
+ "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+ pt->pid, vma->vm_start, vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c
index 28d1d3c24d65..3dadfa2a8528 100644
--- a/trunk/drivers/gpu/drm/drm_irq.c
+++ b/trunk/drivers/gpu/drm/drm_irq.c
@@ -164,10 +164,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
- if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+ if ((vblrc > 0) && (abs(diff_ns) > 1000000))
atomic_inc(&dev->_vblank_count[crtc]);
- smp_mb__after_atomic_inc();
- }
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
@@ -493,12 +491,6 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
- /* Fields of interlaced scanout modes are only halve a frame duration.
- * Double the dotclock to get halve the frame-/line-/pixelduration.
- */
- if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
- dotclock *= 2;
-
/* Valid dotclock? */
if (dotclock > 0) {
/* Convert scanline length in pixels and video dot clock to
@@ -611,6 +603,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EAGAIN;
}
+ /* Don't know yet how to handle interlaced or
+ * double scan modes. Just no-op for now.
+ */
+ if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
+ return -ENOTSUPP;
+ }
+
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
@@ -858,11 +858,10 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (rc) {
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ smp_wmb();
}
- smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
- smp_mb__after_atomic_inc();
}
/**
@@ -1012,8 +1011,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int ret = 0;
- unsigned int crtc;
+ int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1295,16 +1293,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
- if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+ smp_wmb();
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
- smp_mb__before_atomic_inc();
atomic_inc(&dev->_vblank_count[crtc]);
- smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c
index 4ff9b6cc973f..3601466c5502 100644
--- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
int max_freq;
/* RPSTAT1 is in the GT power well */
- __gen6_gt_force_wake_get(dev_priv);
+ __gen6_force_wake_get(dev_priv);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * 100);
- __gen6_gt_force_wake_put(dev_priv);
+ __gen6_force_wake_put(dev_priv);
} else {
seq_printf(m, "no P-state info available\n");
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c
index e33d9be7df3b..17bd766f2081 100644
--- a/trunk/drivers/gpu/drm/i915/i915_dma.c
+++ b/trunk/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,17 +1895,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
- /* 965GM sometimes incorrectly writes to hardware status page (HWS)
- * using 32bit addressing, overwriting memory if HWS is located
- * above 4GB.
- *
- * The documentation also mentions an issue with undefined
- * behaviour if any general state is accessed within a page above 4GB,
- * which also needs to be handled carefully.
- */
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
- dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
-
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c
index 22ec066adae6..cfb56d0ff367 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.c
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.c
@@ -46,12 +46,6 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
-unsigned int i915_semaphores = 0;
-module_param_named(semaphores, i915_semaphores, int, 0600);
-
-unsigned int i915_enable_rc6 = 0;
-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
-
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -257,7 +251,7 @@ void intel_detect_pch (struct drm_device *dev)
}
}
-void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -273,22 +267,12 @@ void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
-void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo < 20 && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
-}
-
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -376,7 +360,7 @@ static int i915_drm_thaw(struct drm_device *dev)
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
- if (IS_IRONLAKE_M(dev))
+ if (dev_priv->renderctx && dev_priv->pwrctx)
ironlake_enable_rc6(dev);
}
diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h
index 456f40484838..a0149c619cdd 100644
--- a/trunk/drivers/gpu/drm/i915/i915_drv.h
+++ b/trunk/drivers/gpu/drm/i915/i915_drv.h
@@ -956,10 +956,8 @@ extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
-extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
-extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1178,9 +1176,6 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
-
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1357,32 +1352,22 @@ __i915_write(64, q)
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
-
-static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
if (dev_priv->info->gen >= 6) {
- __gen6_gt_force_wake_get(dev_priv);
+ __gen6_force_wake_get(dev_priv);
val = I915_READ(reg);
- __gen6_gt_force_wake_put(dev_priv);
+ __gen6_force_wake_put(dev_priv);
} else
val = I915_READ(reg);
return val;
}
-static inline void i915_gt_write(struct drm_i915_private *dev_priv,
- u32 reg, u32 val)
-{
- if (dev_priv->info->gen >= 6)
- __gen6_gt_wait_for_fifo(dev_priv);
- I915_WRITE(reg, val);
-}
-
static inline void
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c
index 36e66cc5225e..cf4f74c7c6fb 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem.c
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
-uint32_t
+static uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 50ab1614571c..d2f445e825f2 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (from == NULL || to == from)
return 0;
- /* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
return i915_gem_object_wait_rendering(obj, true);
idx = intel_ring_sync_index(from, to);
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c
index d64843e18df2..22a32b9932c5 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,27 +349,14 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
- /* Rebind if we need a change of alignment */
- if (!obj->map_and_fenceable) {
- u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(obj);
- if (obj->gtt_offset & (unfenced_alignment - 1))
- ret = i915_gem_object_unbind(obj);
- }
-
- if (ret == 0) {
- obj->tiling_changed = true;
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
- }
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
}
- /* we have to maintain this existing ABI... */
- args->stride = obj->stride;
- args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c
index 8a9e08bf1cf7..97f946dcc1aa 100644
--- a/trunk/drivers/gpu/drm/i915/i915_irq.c
+++ b/trunk/drivers/gpu/drm/i915/i915_irq.c
@@ -316,8 +316,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -1651,7 +1649,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK;
+ hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
+ I915_WRITE(FDI_RXA_IMR, 0);
+ I915_WRITE(FDI_RXB_IMR, 0);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h
index 2abe240dae58..5cfc68940f17 100644
--- a/trunk/drivers/gpu/drm/i915/i915_reg.h
+++ b/trunk/drivers/gpu/drm/i915/i915_reg.h
@@ -174,9 +174,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
-#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
-#define MI_INVALIDATE_TLB (1<<18)
-#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -3271,8 +3269,6 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
-#define GT_FIFO_FREE_ENTRIES 0x120008
-
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c
index 49fb54fd9a18..7e42aa586504 100644
--- a/trunk/drivers/gpu/drm/i915/intel_display.c
+++ b/trunk/drivers/gpu/drm/i915/intel_display.c
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
- __gen6_gt_force_wake_get(dev_priv);
+ __gen6_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- __gen6_gt_force_wake_put(dev_priv);
+ __gen6_force_wake_put(dev_priv);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
- *
- * This should only fail upon a hung GPU, in which case we
- * can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
- (void) ret;
+ if (ret) {
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,31 +2045,6 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
-
- /*
- * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
- * must be driven by its own crtc; no sharing is possible.
- */
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
- switch (encoder->type) {
- case INTEL_OUTPUT_EDP:
- if (!intel_encoder_is_pch_edp(&encoder->base))
- return false;
- continue;
- }
- }
-
- return true;
-}
-
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2078,7 +2053,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
- bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2092,56 +2066,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- is_pch_port = intel_crtc_driving_pch(crtc);
-
- if (is_pch_port)
- ironlake_fdi_enable(crtc);
- else {
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
-
- POSTING_READ(reg);
- udelay(100);
- }
+ ironlake_fdi_enable(crtc);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2175,10 +2100,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
- /* Skip the PCH stuff if possible */
- if (!is_pch_port)
- goto done;
-
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2263,7 +2184,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-done:
+
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -5637,7 +5558,9 @@ static void intel_crtc_reset(struct drm_crtc *crtc)
/* Reset flags back to the 'unknown' status so that they
* will be correctly set on the initial modeset.
*/
+ intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = -1;
+ intel_crtc->active = true; /* force the pipe off on setup_init_config */
}
static struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5743,7 +5666,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc_reset(&intel_crtc->base);
- intel_crtc->active = true; /* force the pipe off on setup_init_config */
if (HAS_PCH_SPLIT(dev)) {
intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6282,7 +6204,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- __gen6_gt_force_wake_get(dev_priv);
+ __gen6_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6380,7 +6302,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
- __gen6_gt_force_wake_put(dev_priv);
+ __gen6_force_wake_put(dev_priv);
}
void intel_enable_clock_gating(struct drm_device *dev)
@@ -6541,60 +6463,52 @@ void intel_enable_clock_gating(struct drm_device *dev)
}
}
-static void ironlake_teardown_rc6(struct drm_device *dev)
+void intel_disable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+ I915_WRITE(CCID, 0);
+ POSTING_READ(CCID);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
dev_priv->renderctx = NULL;
}
if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->pwrctx = NULL;
}
-
- ironlake_teardown_rc6(dev);
}
-static int ironlake_setup_rc6(struct drm_device *dev)
+static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
- return -ENOMEM;
-
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 10);
+ POSTING_READ(CCID);
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
}
void ironlake_enable_rc6(struct drm_device *dev)
@@ -6602,26 +6516,15 @@ void ironlake_enable_rc6(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!i915_enable_rc6)
- return;
-
- ret = ironlake_setup_rc6(dev);
- if (ret)
- return;
-
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = BEGIN_LP_RING(6);
if (ret) {
- ironlake_teardown_rc6(dev);
+ ironlake_disable_rc6(dev);
return;
}
-
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6638,7 +6541,6 @@ void ironlake_enable_rc6(struct drm_device *dev)
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
-
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -6881,9 +6783,21 @@ void intel_modeset_init(struct drm_device *dev)
if (IS_GEN6(dev))
gen6_enable_rps(dev_priv);
- if (IS_IRONLAKE_M(dev))
+ if (IS_IRONLAKE_M(dev)) {
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ goto skip_rc6;
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ goto skip_rc6;
+ }
ironlake_enable_rc6(dev);
+ }
+skip_rc6:
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c
index 51cb4e36997f..1f4242b682c8 100644
--- a/trunk/drivers/gpu/drm/i915/intel_dp.c
+++ b/trunk/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,24 +1639,6 @@ static int intel_dp_get_modes(struct drm_connector *connector)
return 0;
}
-static bool
-intel_dp_detect_audio(struct drm_connector *connector)
-{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct edid *edid;
- bool has_audio = false;
-
- edid = drm_get_edid(connector, &intel_dp->adapter);
- if (edid) {
- has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
- kfree(edid);
- }
-
- return has_audio;
-}
-
static int
intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -1670,23 +1652,17 @@ intel_dp_set_property(struct drm_connector *connector,
return ret;
if (property == intel_dp->force_audio_property) {
- int i = val;
- bool has_audio;
-
- if (i == intel_dp->force_audio)
+ if (val == intel_dp->force_audio)
return 0;
- intel_dp->force_audio = i;
+ intel_dp->force_audio = val;
- if (i == 0)
- has_audio = intel_dp_detect_audio(connector);
- else
- has_audio = i > 0;
-
- if (has_audio == intel_dp->has_audio)
+ if (val > 0 && intel_dp->has_audio)
+ return 0;
+ if (val < 0 && !intel_dp->has_audio)
return 0;
- intel_dp->has_audio = has_audio;
+ intel_dp->has_audio = val > 0;
goto done;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h
index 2c431049963c..74db2557d644 100644
--- a/trunk/drivers/gpu/drm/i915/intel_drv.h
+++ b/trunk/drivers/gpu/drm/i915/intel_drv.h
@@ -298,6 +298,7 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void intel_disable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c
index c635c9e357b9..0d0273e7b029 100644
--- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,27 +251,6 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
}
-static bool
-intel_hdmi_detect_audio(struct drm_connector *connector)
-{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct edid *edid;
- bool has_audio = false;
-
- edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
- if (edid) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- has_audio = drm_detect_monitor_audio(edid);
-
- connector->display_info.raw_edid = NULL;
- kfree(edid);
- }
-
- return has_audio;
-}
-
static int
intel_hdmi_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -285,23 +264,17 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == intel_hdmi->force_audio_property) {
- int i = val;
- bool has_audio;
-
- if (i == intel_hdmi->force_audio)
+ if (val == intel_hdmi->force_audio)
return 0;
- intel_hdmi->force_audio = i;
-
- if (i == 0)
- has_audio = intel_hdmi_detect_audio(connector);
- else
- has_audio = i > 0;
+ intel_hdmi->force_audio = val;
- if (has_audio == intel_hdmi->has_audio)
+ if (val > 0 && intel_hdmi->has_audio)
+ return 0;
+ if (val < 0 && !intel_hdmi->has_audio)
return 0;
- intel_hdmi->has_audio = has_audio;
+ intel_hdmi->has_audio = val > 0;
goto done;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c
index bcdba7bd5cfa..ace8d5d30dd2 100644
--- a/trunk/drivers/gpu/drm/i915/intel_lvds.c
+++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,6 +261,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return true;
}
+ /* Make sure pre-965s set dither correctly */
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
adjusted_mode->vdisplay == mode->vdisplay)
@@ -368,16 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- /* If not enabling scaling, be consistent and always use 0. */
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
-
- /* Make sure pre-965 set dither correctly */
- if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control;
diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c
index f8f86e57df22..c65992df458d 100644
--- a/trunk/drivers/gpu/drm/i915/intel_panel.c
+++ b/trunk/drivers/gpu/drm/i915/intel_panel.c
@@ -208,6 +208,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
+ val >>= 1;
}
}
@@ -234,11 +235,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (is_backlight_combination_mode(dev)){
u32 max = intel_panel_get_max_backlight(dev);
- u8 lbpc;
+ u8 lpbc;
- lbpc = level * 0xfe / max + 1;
- level /= lbpc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
+ lpbc = level * 0xfe / max + 1;
+ level /= lpbc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
}
tmp = I915_READ(BLC_PWM_CTL);
diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
index 445f27efe677..6218fa97aa1e 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1059,25 +1059,22 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate, u32 flush)
+ u32 invalidate_domains,
+ u32 flush_domains)
{
- uint32_t cmd;
int ret;
- if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
return 0;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- cmd = MI_FLUSH_DW;
- if (invalidate & I915_GEM_GPU_DOMAINS)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
@@ -1233,25 +1230,22 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
}
static int blt_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate, u32 flush)
+ u32 invalidate_domains,
+ u32 flush_domains)
{
- uint32_t cmd;
int ret;
- if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
return 0;
ret = blt_ring_begin(ring, 4);
if (ret)
return ret;
- cmd = MI_FLUSH_DW;
- if (invalidate & I915_GEM_DOMAIN_RENDER)
- cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
index 34306865a5df..6d6fde85a636 100644
--- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,23 +14,22 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
-#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
-#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
-#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c
index 7c50cdce84f0..6a09c1413d60 100644
--- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,7 +46,6 @@
SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
-#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
@@ -1360,8 +1359,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
}
- } else
- status = connector_status_disconnected;
+ }
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1409,25 +1407,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
- else if (IS_TMDS(intel_sdvo_connector))
+ else if (response & SDVO_TMDS_MASK)
ret = intel_sdvo_hdmi_sink_detect(connector);
- else {
- struct edid *edid;
-
- /* if we have an edid check it matches the connection */
- edid = intel_sdvo_get_edid(connector);
- if (edid == NULL)
- edid = intel_sdvo_get_analog_edid(connector);
- if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
- ret = connector_status_connected;
- connector->display_info.raw_edid = NULL;
- kfree(edid);
- } else
- ret = connector_status_connected;
- }
+ else
+ ret = connector_status_connected;
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
@@ -1463,15 +1446,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
-
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1690,22 +1668,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
kfree(connector);
}
-static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
-{
- struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- struct edid *edid;
- bool has_audio = false;
-
- if (!intel_sdvo->is_hdmi)
- return false;
-
- edid = intel_sdvo_get_edid(connector);
- if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
- has_audio = drm_detect_monitor_audio(edid);
-
- return has_audio;
-}
-
static int
intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
@@ -1722,23 +1684,17 @@ intel_sdvo_set_property(struct drm_connector *connector,
return ret;
if (property == intel_sdvo_connector->force_audio_property) {
- int i = val;
- bool has_audio;
-
- if (i == intel_sdvo_connector->force_audio)
+ if (val == intel_sdvo_connector->force_audio)
return 0;
- intel_sdvo_connector->force_audio = i;
+ intel_sdvo_connector->force_audio = val;
- if (i == 0)
- has_audio = intel_sdvo_detect_hdmi_audio(connector);
- else
- has_audio = i > 0;
-
- if (has_audio == intel_sdvo->has_hdmi_audio)
+ if (val > 0 && intel_sdvo->has_hdmi_audio)
+ return 0;
+ if (val < 0 && !intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_hdmi_audio = has_audio;
+ intel_sdvo->has_hdmi_audio = val > 0;
goto done;
}
diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c
index fe4a53a50b83..93206e4eaa6f 100644
--- a/trunk/drivers/gpu/drm/i915/intel_tv.c
+++ b/trunk/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,8 +1234,7 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct intel_tv *intel_tv,
- struct drm_connector *connector)
+intel_tv_detect_type (struct intel_tv *intel_tv)
{
struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_device *dev = encoder->dev;
@@ -1246,13 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
int type;
/* Disable TV interrupts around load detect or we'll recurse */
- if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0,
- PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1305,13 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
I915_WRITE(TV_CTL, save_tv_ctl);
/* Restore interrupt config */
- if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0,
- PIPE_HOTPLUG_INTERRUPT_ENABLE |
- PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return type;
}
@@ -1361,7 +1356,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv, connector);
+ type = intel_tv_detect_type(intel_tv);
} else if (force) {
struct drm_crtc *crtc;
int dpms_mode;
@@ -1369,7 +1364,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(intel_tv, connector);
+ type = intel_tv_detect_type(intel_tv);
intel_release_load_detect_pipe(&intel_tv->base, connector,
dpms_mode);
} else
@@ -1663,18 +1658,6 @@ intel_tv_init(struct drm_device *dev)
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
- /* The documentation, for the older chipsets at least, recommend
- * using a polling method rather than hotplug detection for TVs.
- * This is because in order to perform the hotplug detection, the PLLs
- * for the TV must be kept alive increasing power drain and starving
- * bandwidth from other encoders. Notably for instance, it causes
- * pipe underruns on Crestline when this encoder is supposedly idle.
- *
- * More recent chipsets favour HDMI rather than integrated S-Video.
- */
- connector->polled =
- DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c
index 6bdab891c64e..49e5e99917e2 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->tvconf.has_component_output = false;
break;
case OUTPUT_LVDS:
- if ((conn & 0x00003f00) >> 8 != 0x10)
+ if ((conn & 0x00003f00) != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
entry->lvdsconf.use_power_scripts = true;
break;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
index a52184007f5f..a7fae26f4654 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,10 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- if (nvbo->vma.node) {
- nouveau_vm_unmap(&nvbo->vma);
- nouveau_vm_put(&nvbo->vma);
- }
+ nouveau_vm_put(&nvbo->vma);
kfree(nvbo);
}
@@ -131,7 +128,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
}
- nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -170,17 +166,17 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
if (dev_priv->card_type == NV_10 &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
- nvbo->bo.mem.num_pages < vram_pages / 2) {
+ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
+ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
+
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
nvbo->placement.fpfn = vram_pages / 2;
nvbo->placement.lpfn = ~0;
@@ -789,7 +785,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -815,11 +811,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c
index 390d82c3c4b0..a21e00076839 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,7 +507,6 @@ nouveau_connector_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c b/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c
index b368ed74aad7..65699bfaaaea 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,8 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
- &chan->m2mf_ntfy);
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
if (ret)
return ret;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h
index 982d70b12722..9821fcacc3d2 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -852,8 +852,7 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
- int cout, uint32_t start, uint32_t end,
- uint32_t *offset);
+ int cout, uint32_t *offset);
extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
struct drm_file *);
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c
index b0fb9bdcddb7..26347b7cd872 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,10 +725,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0xff, &node);
- if (ret) {
- mem->mm_node = NULL;
- return (ret == -ENOSPC) ? 0 : ret;
- }
+ if (ret)
+ return ret;
node->page_shift = 12;
if (nvbo->vma.node)
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c
index 7609756b6faf..8844b50c3e54 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
return 0;
}
- return -ENOSPC;
+ return -ENOMEM;
}
int
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c b/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5ea167623a82..fe29d604b820 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,8 +96,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
- int size, uint32_t start, uint32_t end,
- uint32_t *b_offset)
+ int size, uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *nobj = NULL;
@@ -105,10 +104,9 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
uint32_t offset;
int target, ret;
- mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
- start, end, 0);
+ mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
if (mem)
- mem = drm_mm_get_block_range(mem, size, 0, start, end);
+ mem = drm_mm_get_block(mem, size, 0);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
@@ -179,8 +177,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (IS_ERR(chan))
return PTR_ERR(chan);
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
- &na->offset);
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
nouveau_channel_put(&chan);
return ret;
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_pm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_pm.c
index 4399e2f34db4..f05c0cddfeca 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_level *perflvl;
- if (!pm->cur || pm->cur == &pm->boot)
+ if (pm->cur == &pm->boot)
return;
perflvl = pm->cur;
diff --git a/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c b/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c
index c82db37d9f41..ef23550407b5 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
bool duallink, dummy;
- nouveau_bios_parse_lvds_table(dev, output_mode->clock,
- &duallink, &dummy);
+ nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
+ clock, &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -518,6 +518,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
return;
if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
+ struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
/* when removing an output, crtc may not be set, but PANEL_OFF
* must still be run
*/
@@ -525,8 +527,12 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
if (mode == DRM_MODE_DPMS_ON) {
+ if (!nv_connector->native_mode) {
+ NV_ERROR(dev, "Not turning on LVDS without native mode\n");
+ return;
+ }
call_lvds_script(dev, nv_encoder->dcb, head,
- LVDS_PANEL_ON, nv_encoder->mode.clock);
+ LVDS_PANEL_ON, nv_connector->native_mode->clock);
} else
/* pxclk of 0 is fine for PANEL_OFF, and for a
* disconnected LVDS encoder there is no native_mode
diff --git a/trunk/drivers/gpu/drm/nouveau/nv40_graph.c b/trunk/drivers/gpu/drm/nouveau/nv40_graph.c
index 18d30c2c1aa6..8870d72388c8 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,32 +211,18 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
- case 0x40:
- case 0x41: /* guess */
- case 0x42:
- case 0x43:
- case 0x45: /* guess */
- case 0x4e:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
- break;
case 0x44:
case 0x4a:
+ case 0x4e:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
+
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
- case 0x4c:
- case 0x67:
- default:
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -244,6 +230,15 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
+
+ default:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
}
}
@@ -401,20 +396,17 @@ nv40_graph_init(struct drm_device *dev)
break;
default:
switch (dev_priv->chipset) {
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x45:
- case 0x4e:
- case 0x44:
- case 0x4a:
- nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
- break;
- default:
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
break;
+ default:
+ nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
}
nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c
index e57caa2a00e3..ea0041810ae3 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -403,24 +403,16 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
void
nv50_instmem_flush(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_vm.c b/trunk/drivers/gpu/drm/nouveau/nv50_vm.c
index 6144156f255a..459ff08241e5 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -169,11 +169,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
- spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c
index a4e5e53e0a62..b1537000a104 100644
--- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
switch (radeon_crtc->rmx_type) {
case RMX_CENTER:
- args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
- args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+ args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+ args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+ args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+ args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
break;
case RMX_ASPECT:
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
if (a1 > a2) {
- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+ args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+ args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
} else if (a2 > a1) {
- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+ args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
}
break;
case RMX_FULL:
default:
- args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
- args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
- args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
- args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
+ args.usOverscanRight = radeon_crtc->h_border;
+ args.usOverscanLeft = radeon_crtc->h_border;
+ args.usOverscanBottom = radeon_crtc->v_border;
+ args.usOverscanTop = radeon_crtc->v_border;
break;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE5(rdev)) {
- args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+ args.v3.usSpreadSpectrumAmountFrac = 0;
args.v3.ucSpreadSpectrumType = ss->type;
switch (pll_id) {
case ATOM_PPLL1:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ args.v3.usSpreadSpectrumAmount = ss->amount;
+ args.v3.usSpreadSpectrumStep = ss->step;
break;
case ATOM_PPLL2:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ args.v3.usSpreadSpectrumAmount = ss->amount;
+ args.v3.usSpreadSpectrumStep = ss->step;
break;
case ATOM_DCPLL:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
+ args.v3.usSpreadSpectrumAmount = 0;
+ args.v3.usSpreadSpectrumStep = 0;
break;
case ATOM_PPLL_INVALID:
return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ args.v2.usSpreadSpectrumAmount = ss->amount;
+ args.v2.usSpreadSpectrumStep = ss->step;
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+ args.v2.usSpreadSpectrumAmount = ss->amount;
+ args.v2.usSpreadSpectrumStep = ss->step;
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
+ args.v2.usSpreadSpectrumAmount = 0;
+ args.v2.usSpreadSpectrumStep = 0;
break;
case ATOM_PPLL_INVALID:
return;
@@ -538,6 +538,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -554,28 +555,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
dp_clock = dig_connector->dp_clock;
}
}
-
+/* this might work properly with the new pll algo */
+#if 0 /* doesn't work properly on some laptops */
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
- if (ASIC_IS_AVIVO(rdev))
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
}
-
+#endif
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- pll->flags |= RADEON_PLL_IS_LCD;
+ /* rv515 needs more testing with this option */
+ if (rdev->family != CHIP_RV515) {
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ pll->flags |= RADEON_PLL_IS_LCD;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -662,12 +664,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
- pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
}
@@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
* SetPixelClock provides the dividers
*/
args.v5.ucCRTC = ATOM_CRTC_INVALID;
- args.v5.usPixelClock = cpu_to_le16(dispclk);
+ args.v5.usPixelClock = dispclk;
args.v5.ucPpll = ATOM_DCPLL;
break;
case 6:
/* if the default dcpll clock is specified,
* SetPixelClock provides the dividers
*/
- args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+ args.v6.ulDispEngClkFreq = dispclk;
args.v6.ucPpll = ATOM_DCPLL;
break;
default:
@@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
- if (ASIC_IS_AVIVO(rdev))
+ /* rv515 seems happier with the old algo */
+ if (rdev->family == CHIP_RV515)
+ radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
+ else if (ASIC_IS_AVIVO(rdev))
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else
@@ -991,9 +995,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
}
}
-static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
+static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -1133,6 +1137,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
@@ -1290,6 +1300,12 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ else
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = radeon_fb->obj->driver_private;
@@ -1313,7 +1329,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
- return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
else
@@ -1328,7 +1344,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
- return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
+ return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
else
diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c
index 6140ea1de45a..ffdc8332b76e 100644
--- a/trunk/drivers/gpu/drm/radeon/evergreen.c
+++ b/trunk/drivers/gpu/drm/radeon/evergreen.c
@@ -1192,11 +1192,7 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(rdev, 1);
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
}
@@ -1211,11 +1207,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
return -EINVAL;
r700_cp_stop(rdev);
- WREG32(CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- BUF_SWAP_32BIT |
-#endif
- RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1334,11 +1326,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_WPTR, 0);
/* set the wb address wether it's enabled or not */
- WREG32(CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- RB_RPTR_SWAP(2) |
-#endif
- ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2194,6 +2182,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -2638,8 +2627,8 @@ int evergreen_irq_process(struct radeon_device *rdev)
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
- src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
- src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
switch (src_id) {
case 1: /* D1 vblank/vline */
@@ -2933,7 +2922,7 @@ static int evergreen_startup(struct radeon_device *rdev)
/* XXX: ontario has problems blitting to gart at the moment */
if (rdev->family == CHIP_PALM) {
rdev->asic->copy = NULL;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
}
/* allocate wb buffer */
diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/trunk/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2be698e78ff2..a1ba4b3053d0 100644
--- a/trunk/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/trunk/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
+ cb_color_info = ((format << 2) | (1 << 24));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
@@ -133,9 +133,6 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
/* high addr, stride */
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= (2 << 30);
-#endif
/* xyzw swizzles */
sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
@@ -176,7 +173,7 @@ set_tex_resource(struct radeon_device *rdev,
sq_tex_resource_word0 = (1 << 0); /* 2D */
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
+ sq_tex_resource_word1 = ((h - 1) << 0);
/* xyzw swizzles */
sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
@@ -224,11 +221,7 @@ draw_auto(struct radeon_device *rdev)
radeon_ring_write(rdev, DI_PT_RECTLIST);
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
+ radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
@@ -548,7 +541,7 @@ static inline uint32_t i2f(uint32_t input)
int evergreen_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
- int i, r, dwords;
+ int r, dwords;
void *ptr;
u32 packet2s[16];
int num_packet2s = 0;
@@ -564,7 +557,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
dwords = rdev->r600_blit.state_len;
while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+ packet2s[num_packet2s++] = PACKET2(0);
dwords++;
}
@@ -605,10 +598,8 @@ int evergreen_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
- for (i = 0; i < evergreen_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
- for (i = 0; i < evergreen_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+ memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
+ memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
@@ -623,7 +614,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -631,7 +622,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
{
int r;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/trunk/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index 3a10399e0066..ef1d28c07fbf 100644
--- a/trunk/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/trunk/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,19 +311,11 @@ const u32 evergreen_vs[] =
0x00000000,
0x3c000000,
0x67961001,
-#ifdef __BIG_ENDIAN
- 0x000a0000,
-#else
0x00080000,
-#endif
0x00000000,
0x1c000000,
0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
0x00000008,
-#endif
0x00000000,
};
diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h
index eb4acf4528ff..afec1aca2a73 100644
--- a/trunk/drivers/gpu/drm/radeon/evergreend.h
+++ b/trunk/drivers/gpu/drm/radeon/evergreend.h
@@ -98,7 +98,6 @@
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
-#define RB_RPTR_SWAP(x) ((x) << 0)
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
diff --git a/trunk/drivers/gpu/drm/radeon/mkregtable.c b/trunk/drivers/gpu/drm/radeon/mkregtable.c
index 5a82b6b75849..607241c6a8a9 100644
--- a/trunk/drivers/gpu/drm/radeon/mkregtable.c
+++ b/trunk/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,10 +673,8 @@ static int parser_auth(struct table *t, const char *filename)
last_reg = strtol(last_reg_s, NULL, 16);
do {
- if (fgets(buf, 1024, file) == NULL) {
- fclose(file);
+ if (fgets(buf, 1024, file) == NULL)
return -1;
- }
len = strlen(buf);
if (ftell(file) == end)
done = 1;
@@ -687,7 +685,6 @@ static int parser_auth(struct table *t, const char *filename)
fprintf(stderr,
"Error matching regular expression %d in %s\n",
r, filename);
- fclose(file);
return -1;
} else {
buf[match[0].rm_eo] = 0;
diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c
index e372f9e1e5ce..5f15820efe12 100644
--- a/trunk/drivers/gpu/drm/radeon/r100.c
+++ b/trunk/drivers/gpu/drm/radeon/r100.c
@@ -70,6 +70,23 @@ MODULE_FIRMWARE(FIRMWARE_R520);
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
+ tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
+ /* make sure pending bit is asserted */
+ tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+ WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen as late as possible in the vblank interval.
+ * same field for crtc1/2
+ */
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
@@ -1024,7 +1041,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
return r;
}
rdev->cp.ready = true;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -1042,7 +1059,7 @@ void r100_cp_fini(struct radeon_device *rdev)
void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -1410,7 +1427,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
- track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
@@ -1423,7 +1439,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
- track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
@@ -1439,7 +1454,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
- track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T0_0:
case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1457,7 +1471,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
- track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T1_0:
case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1475,7 +1488,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
- track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_OFFSET_T2_0:
case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1493,12 +1505,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
- track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
- track->cb_dirty = true;
- track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1519,11 +1528,9 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
ib[idx] = tmp;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
- track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
- track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1548,8 +1555,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
- track->cb_dirty = true;
- track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
@@ -1567,7 +1572,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
default:
break;
}
- track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1584,7 +1588,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
- track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
@@ -1599,14 +1602,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
- track->tex_dirty = true;
break;
case RADEON_PP_TEX_PITCH_0:
case RADEON_PP_TEX_PITCH_1:
case RADEON_PP_TEX_PITCH_2:
i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
track->textures[i].pitch = idx_value + 32;
- track->tex_dirty = true;
break;
case RADEON_PP_TXFILTER_0:
case RADEON_PP_TXFILTER_1:
@@ -1620,7 +1621,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
- track->tex_dirty = true;
break;
case RADEON_PP_TXFORMAT_0:
case RADEON_PP_TXFORMAT_1:
@@ -1673,7 +1673,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
- track->tex_dirty = true;
break;
case RADEON_PP_CUBIC_FACES_0:
case RADEON_PP_CUBIC_FACES_1:
@@ -1684,7 +1683,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
- track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2312,6 +2310,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
@@ -3319,9 +3318,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
unsigned long size;
unsigned prim_walk;
unsigned nverts;
- unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+ unsigned num_cb = track->num_cb;
- if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+ if (!track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
@@ -3342,9 +3341,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
return -EINVAL;
}
}
- track->cb_dirty = false;
-
- if (track->zb_dirty && track->z_enabled) {
+ if (track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
@@ -3361,28 +3358,6 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
return -EINVAL;
}
}
- track->zb_dirty = false;
-
- if (track->aa_dirty && track->aaresolve) {
- if (track->aa.robj == NULL) {
- DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
- return -EINVAL;
- }
- /* I believe the format comes from colorbuffer0. */
- size = track->aa.pitch * track->cb[0].cpp * track->maxy;
- size += track->aa.offset;
- if (size > radeon_bo_size(track->aa.robj)) {
- DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->aa.robj));
- DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
- i, track->aa.pitch, track->cb[0].cpp,
- track->aa.offset, track->maxy);
- return -EINVAL;
- }
- }
- track->aa_dirty = false;
-
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
@@ -3442,23 +3417,13 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
prim_walk);
return -EINVAL;
}
-
- if (track->tex_dirty) {
- track->tex_dirty = false;
- return r100_cs_track_texture_check(rdev, track);
- }
- return 0;
+ return r100_cs_track_texture_check(rdev, track);
}
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i, face;
- track->cb_dirty = true;
- track->zb_dirty = true;
- track->tex_dirty = true;
- track->aa_dirty = true;
-
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
@@ -3472,8 +3437,6 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
- track->aaresolve = false;
- track->aa.robj = NULL;
}
for (i = 0; i < track->num_cb; i++) {
@@ -3783,6 +3746,8 @@ static int r100_startup(struct radeon_device *rdev)
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/r100_track.h b/trunk/drivers/gpu/drm/radeon/r100_track.h
index 2fef9de7f363..af65600e6564 100644
--- a/trunk/drivers/gpu/drm/radeon/r100_track.h
+++ b/trunk/drivers/gpu/drm/radeon/r100_track.h
@@ -52,7 +52,14 @@ struct r100_cs_track_texture {
unsigned compress_format;
};
+struct r100_cs_track_limits {
+ unsigned num_cb;
+ unsigned num_texture;
+ unsigned max_levels;
+};
+
struct r100_cs_track {
+ struct radeon_device *rdev;
unsigned num_cb;
unsigned num_texture;
unsigned maxy;
@@ -66,17 +73,11 @@ struct r100_cs_track {
struct r100_cs_track_array arrays[11];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
- struct r100_cs_track_cb aa;
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
bool zb_cb_clear;
bool blend_read_enable;
- bool cb_dirty;
- bool zb_dirty;
- bool tex_dirty;
- bool aa_dirty;
- bool aaresolve;
};
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/trunk/drivers/gpu/drm/radeon/r200.c b/trunk/drivers/gpu/drm/radeon/r200.c
index f24058300413..d2408c395619 100644
--- a/trunk/drivers/gpu/drm/radeon/r200.c
+++ b/trunk/drivers/gpu/drm/radeon/r200.c
@@ -184,7 +184,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
- track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
@@ -197,7 +196,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
- track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
@@ -216,7 +214,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
- track->tex_dirty = true;
break;
case R200_PP_CUBIC_OFFSET_F1_0:
case R200_PP_CUBIC_OFFSET_F2_0:
@@ -260,12 +257,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
- track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
- track->cb_dirty = true;
- track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -286,11 +280,9 @@ int r200_packet0_check(struct radeon_cs_parser *p,
ib[idx] = tmp;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
- track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
- track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -320,8 +312,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
- track->cb_dirty = true;
- track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
@@ -339,7 +329,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
default:
break;
}
- track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -356,7 +345,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
- track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
@@ -381,7 +369,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
i = (reg - R200_PP_TXSIZE_0) / 32;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
- track->tex_dirty = true;
break;
case R200_PP_TXPITCH_0:
case R200_PP_TXPITCH_1:
@@ -391,7 +378,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXPITCH_5:
i = (reg - R200_PP_TXPITCH_0) / 32;
track->textures[i].pitch = idx_value + 32;
- track->tex_dirty = true;
break;
case R200_PP_TXFILTER_0:
case R200_PP_TXFILTER_1:
@@ -408,7 +394,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
- track->tex_dirty = true;
break;
case R200_PP_TXMULTI_CTL_0:
case R200_PP_TXMULTI_CTL_1:
@@ -447,7 +432,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].tex_coord_type = 1;
break;
}
- track->tex_dirty = true;
break;
case R200_PP_TXFORMAT_0:
case R200_PP_TXFORMAT_1:
@@ -504,7 +488,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
- track->tex_dirty = true;
break;
case R200_PP_CUBIC_FACES_0:
case R200_PP_CUBIC_FACES_1:
@@ -518,7 +501,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
- track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c
index 069efa8c8ecf..55fe5ba7def3 100644
--- a/trunk/drivers/gpu/drm/radeon/r300.c
+++ b/trunk/drivers/gpu/drm/radeon/r300.c
@@ -667,7 +667,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value;
- track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
@@ -680,7 +679,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
- track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_TX_OFFSET_0:
@@ -719,7 +717,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tmp |= tile_flags;
ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
- track->tex_dirty = true;
break;
/* Tracked registers */
case 0x2084:
@@ -746,8 +743,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (p->rdev->family < CHIP_RV515) {
track->maxy -= 1440;
}
- track->cb_dirty = true;
- track->zb_dirty = true;
break;
case 0x4E00:
/* RB3D_CCTL */
@@ -757,7 +752,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return -EINVAL;
}
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
- track->cb_dirty = true;
break;
case 0x4E38:
case 0x4E3C:
@@ -820,7 +814,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
((idx_value >> 21) & 0xF));
return -EINVAL;
}
- track->cb_dirty = true;
break;
case 0x4F00:
/* ZB_CNTL */
@@ -829,7 +822,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
} else {
track->z_enabled = false;
}
- track->zb_dirty = true;
break;
case 0x4F10:
/* ZB_FORMAT */
@@ -846,7 +838,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
(idx_value & 0xF));
return -EINVAL;
}
- track->zb_dirty = true;
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
@@ -870,17 +861,14 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib[idx] = tmp;
track->zb.pitch = idx_value & 0x3FFC;
- track->zb_dirty = true;
break;
case 0x4104:
- /* TX_ENABLE */
for (i = 0; i < 16; i++) {
bool enabled;
enabled = !!(idx_value & (1 << i));
track->textures[i].enabled = enabled;
}
- track->tex_dirty = true;
break;
case 0x44C0:
case 0x44C4:
@@ -910,7 +898,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
- case R300_TX_FORMAT_FL_I16:
case R300_TX_FORMAT_Y8X8:
case R300_TX_FORMAT_Z5Y6X5:
case R300_TX_FORMAT_Z6Y5X5:
@@ -923,7 +910,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
- case R300_TX_FORMAT_FL_I16A16:
case R300_TX_FORMAT_Z11Y11X10:
case R300_TX_FORMAT_Z10Y11X11:
case R300_TX_FORMAT_W8Z8Y8X8:
@@ -965,8 +951,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
return -EINVAL;
+ break;
}
- track->tex_dirty = true;
break;
case 0x4400:
case 0x4404:
@@ -994,7 +980,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_h = false;
}
- track->tex_dirty = true;
break;
case 0x4500:
case 0x4504:
@@ -1032,7 +1017,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
}
- track->tex_dirty = true;
break;
case 0x4480:
case 0x4484:
@@ -1062,7 +1046,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = !!tmp;
tmp = (idx_value >> 22) & 0xF;
track->textures[i].txdepth = tmp;
- track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1077,7 +1060,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
track->color_channel_mask = idx_value;
- track->cb_dirty = true;
break;
case 0x43a4:
/* SC_HYPERZ_EN */
@@ -1091,8 +1073,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4f1c:
/* ZB_BW_CNTL */
track->zb_cb_clear = !!(idx_value & (1 << 5));
- track->cb_dirty = true;
- track->zb_dirty = true;
if (p->rdev->hyperz_filp != p->filp) {
if (idx_value & (R300_HIZ_ENABLE |
R300_RD_COMP_ENABLE |
@@ -1104,28 +1084,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4e04:
/* RB3D_BLENDCNTL */
track->blend_read_enable = !!(idx_value & (1 << 2));
- track->cb_dirty = true;
- break;
- case R300_RB3D_AARESOLVE_OFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- track->aa.robj = reloc->robj;
- track->aa.offset = idx_value;
- track->aa_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
- break;
- case R300_RB3D_AARESOLVE_PITCH:
- track->aa.pitch = idx_value & 0x3FFE;
- track->aa_dirty = true;
break;
- case R300_RB3D_AARESOLVE_CTL:
- track->aaresolve = idx_value & 0x1;
- track->aa_dirty = true;
+ case 0x4f28: /* ZB_DEPTHCLEARVALUE */
break;
case 0x4f30: /* ZB_MASK_OFFSET */
case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/trunk/drivers/gpu/drm/radeon/r300_reg.h b/trunk/drivers/gpu/drm/radeon/r300_reg.h
index f0bce399c9f3..1a0d5362cd79 100644
--- a/trunk/drivers/gpu/drm/radeon/r300_reg.h
+++ b/trunk/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,8 +1371,6 @@
#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
-#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
-#define R300_RB3D_AARESOLVE_PITCH 0x4E84
#define R300_RB3D_AARESOLVE_CTL 0x4E88
/* gap */
diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c
index 9b3fad23b76c..650672a0f5ad 100644
--- a/trunk/drivers/gpu/drm/radeon/r600.c
+++ b/trunk/drivers/gpu/drm/radeon/r600.c
@@ -1255,6 +1255,7 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP) {
@@ -1936,7 +1937,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
}
@@ -2104,11 +2105,7 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
r600_cp_stop(rdev);
- WREG32(CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- BUF_SWAP_32BIT |
-#endif
- RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2195,11 +2192,7 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_WPTR, 0);
/* set the wb address whether it's enabled or not */
- WREG32(CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- RB_RPTR_SWAP(2) |
-#endif
- ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+ WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2635,11 +2628,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
}
@@ -3308,8 +3297,8 @@ int r600_irq_process(struct radeon_device *rdev)
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
- src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
- src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
switch (src_id) {
case 1: /* D1 vblank/vline */
diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit.c b/trunk/drivers/gpu/drm/radeon/r600_blit.c
index 7f1043448d25..ca5c29f70779 100644
--- a/trunk/drivers/gpu/drm/radeon/r600_blit.c
+++ b/trunk/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
for (i = 0; i < r6xx_vs_size; i++)
- vs[i] = cpu_to_le32(r6xx_vs[i]);
+ vs[i] = r6xx_vs[i];
for (i = 0; i < r6xx_ps_size; i++)
- ps[i] = cpu_to_le32(r6xx_ps[i]);
+ ps[i] = r6xx_ps[i];
dev_priv->blit_vb->used = 512;
@@ -192,9 +192,6 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
DRM_DEBUG("\n");
sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= (2 << 30);
-#endif
BEGIN_RING(9);
OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -294,11 +291,7 @@ draw_auto(drm_radeon_private_t *dev_priv)
OUT_RING(DI_PT_RECTLIST);
OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
-#ifdef __BIG_ENDIAN
- OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
-#else
OUT_RING(DI_INDEX_SIZE_16_BIT);
-#endif
OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
OUT_RING(1);
diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c b/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c
index df68d91e8190..86e5aa07f0db 100644
--- a/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
if (h < 8)
h = 8;
- cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
+ cb_color_info = ((format << 2) | (1 << 27));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
@@ -165,9 +165,6 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= (2 << 30);
-#endif
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0x460);
@@ -202,7 +199,7 @@ set_tex_resource(struct radeon_device *rdev,
if (h < 1)
h = 1;
- sq_tex_resource_word0 = (1 << 0) | (1 << 3);
+ sq_tex_resource_word0 = (1 << 0);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
((w - 1) << 19));
@@ -256,11 +253,7 @@ draw_auto(struct radeon_device *rdev)
radeon_ring_write(rdev, DI_PT_RECTLIST);
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
+ radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
@@ -431,11 +424,7 @@ set_default_state(struct radeon_device *rdev)
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
@@ -478,7 +467,7 @@ static inline uint32_t i2f(uint32_t input)
int r600_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
- int i, r, dwords;
+ int r, dwords;
void *ptr;
u32 packet2s[16];
int num_packet2s = 0;
@@ -497,7 +486,7 @@ int r600_blit_init(struct radeon_device *rdev)
dwords = rdev->r600_blit.state_len;
while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+ packet2s[num_packet2s++] = PACKET2(0);
dwords++;
}
@@ -540,10 +529,8 @@ int r600_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
- for (i = 0; i < r6xx_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
- for (i = 0; i < r6xx_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
+ memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
+ memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
@@ -558,7 +545,7 @@ int r600_blit_init(struct radeon_device *rdev)
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.real_vram_size;
return 0;
}
@@ -566,7 +553,7 @@ void r600_blit_fini(struct radeon_device *rdev)
{
int r;
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.c b/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 2d1f6c5ee2a7..e8151c1d55b2 100644
--- a/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,11 +684,7 @@ const u32 r6xx_vs[] =
0x00000000,
0x3c000000,
0x68cd1000,
-#ifdef __BIG_ENDIAN
- 0x000a0000,
-#else
0x00080000,
-#endif
0x00000000,
};
diff --git a/trunk/drivers/gpu/drm/radeon/r600_cp.c b/trunk/drivers/gpu/drm/radeon/r600_cp.c
index c3ab959bdc7c..4f4cd8b286d5 100644
--- a/trunk/drivers/gpu/drm/radeon/r600_cp.c
+++ b/trunk/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,9 +396,6 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
r600_do_cp_stop(dev_priv);
RADEON_WRITE(R600_CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- R600_BUF_SWAP_32BIT |
-#endif
R600_RB_NO_UPDATE |
R600_RB_BLKSZ(15) |
R600_RB_BUFSZ(3));
@@ -489,12 +486,9 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
r600_do_cp_stop(dev_priv);
RADEON_WRITE(R600_CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- R600_BUF_SWAP_32BIT |
-#endif
R600_RB_NO_UPDATE |
- R600_RB_BLKSZ(15) |
- R600_RB_BUFSZ(3));
+ (15 << 8) |
+ (3 << 0));
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -556,12 +550,8 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
if (!dev_priv->writeback_works) {
/* Disable writeback to avoid unnecessary bus master transfer */
- RADEON_WRITE(R600_CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- R600_BUF_SWAP_32BIT |
-#endif
- RADEON_READ(R600_CP_RB_CNTL) |
- R600_RB_NO_UPDATE);
+ RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) |
+ RADEON_RB_NO_UPDATE);
RADEON_WRITE(R600_SCRATCH_UMSK, 0);
}
}
@@ -585,11 +575,7 @@ int r600_do_engine_reset(struct drm_device *dev)
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
- RADEON_WRITE(R600_CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- R600_BUF_SWAP_32BIT |
-#endif
- R600_RB_RPTR_WR_ENA);
+ RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA);
RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1852,10 +1838,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
+ dev_priv->gart_vm_start;
}
RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (rptr_addr & 0xfffffffc));
+ rptr_addr & 0xffffffff);
RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
upper_32_bits(rptr_addr));
@@ -1906,7 +1889,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
{
u64 scratch_addr;
- scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
+ scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR);
scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
scratch_addr += R600_SCRATCH_REG_OFFSET;
scratch_addr >>= 8;
diff --git a/trunk/drivers/gpu/drm/radeon/r600_cs.c b/trunk/drivers/gpu/drm/radeon/r600_cs.c
index 153095fba62f..7831e0890210 100644
--- a/trunk/drivers/gpu/drm/radeon/r600_cs.c
+++ b/trunk/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,18 +295,17 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
+ dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
return -EINVAL;
}
@@ -321,10 +320,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
- array_mode,
- track->cb_color_bo_offset[i], tmp,
- radeon_bo_size(track->cb_color_bo[i]));
+ dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;
}
}
@@ -459,18 +455,17 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
+ dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
return -EINVAL;
}
@@ -478,10 +473,9 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
}
@@ -1233,18 +1227,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
/* XXX check height as well... */
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
+ __func__, __LINE__, base_offset);
return -EINVAL;
}
if (!IS_ALIGNED(mip_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
+ __func__, __LINE__, mip_offset);
return -EINVAL;
}
diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h
index 04bac0bbd3ec..a5d898b4bad2 100644
--- a/trunk/drivers/gpu/drm/radeon/r600d.h
+++ b/trunk/drivers/gpu/drm/radeon/r600d.h
@@ -154,14 +154,13 @@
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_RB_BASE 0xC100
#define CP_RB_CNTL 0xC104
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
+#define RB_BUFSZ(x) ((x)<<0)
+#define RB_BLKSZ(x) ((x)<<8)
+#define RB_NO_UPDATE (1<<27)
+#define RB_RPTR_WR_ENA (1<<31)
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
-#define RB_RPTR_SWAP(x) ((x) << 0)
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h
index 6b3429495118..56c48b67ef3d 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon.h
@@ -345,6 +345,7 @@ struct radeon_mc {
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
+ u64 active_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
@@ -1447,7 +1448,6 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
extern bool r600_card_posted(struct radeon_device *rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c
index 793c5e6026ad..e75d63b8e21d 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,9 +834,6 @@ static struct radeon_asic sumo_asic = {
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
- .pre_page_flip = &evergreen_pre_page_flip,
- .page_flip = &evergreen_page_flip,
- .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic btc_asic = {
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c
index 02d5c415f499..5c1cc7ad9a15 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->usClkMaskRegisterIndex == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
- gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+ gpio.reg = pin->usGpioPin_AIndex * 4;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
@@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
data_offset);
switch (crev) {
case 1:
- if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
+ if (igp_info->info.ulBootUpMemoryClock)
return true;
break;
case 2:
- if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
+ if (igp_info->info_2.ulBootUpSidePortClock)
return true;
break;
default:
@@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
for (i = 0; i < num_indices; i++) {
if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+ (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
ss->percentage =
le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+ (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
ss->percentage =
le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+ (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
ss->percentage =
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
- lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
- lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+ lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
+ lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->linkb = false;
/* parse the lcd record table */
- if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+ if (lvds_info->info.usModePatchTableOffset) {
ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
bool bad_record = false;
u8 *record = (u8 *)(mode_info->atom_context->bios +
data_offset +
- le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+ lvds_info->info.usModePatchTableOffset);
while (*record != ATOM_RECORD_END_TYPE) {
switch (*record) {
case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
- vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ vddc = firmware_info->info_14.usBootUpVDDCVoltage;
}
return vddc;
@@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- le16_to_cpu(clock_info->evergreen.usVDDC);
+ clock_info->evergreen.usVDDC;
} else {
sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- le16_to_cpu(clock_info->r600.usVDDC);
+ clock_info->r600.usVDDC;
}
if (rdev->flags & RADEON_IS_IGP) {
@@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
state_array = (struct StateArray *)
(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ power_info->pplib.usStateArrayOffset);
clock_info_array = (struct ClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ power_info->pplib.usClockInfoArrayOffset);
non_clock_info_array = (struct NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ power_info->pplib.usNonClockInfoArrayOffset);
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnEngineClock);
+ return args.ulReturnEngineClock;
}
uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- return le32_to_cpu(args.ulReturnMemoryClock);
+ return args.ulReturnMemoryClock;
}
void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
- args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
+ args.ulTargetEngineClock = eng_clock; /* 10 khz */
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
if (rdev->flags & RADEON_IS_IGP)
return;
- args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
+ args.ulTargetMemoryClock = mem_clock; /* 10 khz */
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c
index cf7c8d5b4ec2..d27ef74590cd 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,11 +1504,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4a48)) {
/* Mac X800 */
rdev->mode_info.connector_table = CT_MAC_X800;
- } else if ((rdev->pdev->device == 0x4150) &&
- (rdev->pdev->subsystem_vendor == 0x1002) &&
- (rdev->pdev->subsystem_device == 0x4150)) {
- /* Mac G5 9600 */
- rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -2027,48 +2022,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
&hpd);
break;
- case CT_MAC_G5_9600:
- DRM_INFO("Connector Table: %d (mac g5 9600)\n",
- rdev->mode_info.connector_table);
- /* DVI - tv dac, dvo */
- ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
- hpd.hpd = RADEON_HPD_1; /* ??? */
- radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
- ATOM_DEVICE_DFP2_SUPPORT,
- 0),
- ATOM_DEVICE_DFP2_SUPPORT);
- radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
- ATOM_DEVICE_CRT2_SUPPORT,
- 2),
- ATOM_DEVICE_CRT2_SUPPORT);
- radeon_add_legacy_connector(dev, 0,
- ATOM_DEVICE_DFP2_SUPPORT |
- ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
- &hpd);
- /* ADC - primary dac, internal tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
- hpd.hpd = RADEON_HPD_2; /* ??? */
- radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
- ATOM_DEVICE_DFP1_SUPPORT,
- 0),
- ATOM_DEVICE_DFP1_SUPPORT);
- radeon_add_legacy_encoder(dev,
- radeon_get_encoder_enum(dev,
- ATOM_DEVICE_CRT1_SUPPORT,
- 1),
- ATOM_DEVICE_CRT1_SUPPORT);
- radeon_add_legacy_connector(dev, 1,
- ATOM_DEVICE_DFP1_SUPPORT |
- ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
- &hpd);
- break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c
index 4954e2d6ffa2..0d478932b1a9 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_device.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c
@@ -936,11 +936,8 @@ int radeon_resume_kms(struct drm_device *dev)
int radeon_gpu_reset(struct radeon_device *rdev)
{
int r;
- int resched;
radeon_save_bios_scratch_regs(rdev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
r = radeon_asic_reset(rdev);
@@ -949,7 +946,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
return 0;
}
/* bad news, how to tell it to userspace ? */
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_display.c b/trunk/drivers/gpu/drm/radeon/radeon_display.c
index 3e7e7f9eb781..2eff98cfd728 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_display.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_display.c
@@ -793,11 +793,6 @@ static void avivo_get_fb_div(struct radeon_pll *pll,
tmp *= target_clock;
*fb_div = tmp / pll->reference_freq;
*frac_fb_div = tmp % pll->reference_freq;
-
- if (*fb_div > pll->max_feedback_div)
- *fb_div = pll->max_feedback_div;
- else if (*fb_div < pll->min_feedback_div)
- *fb_div = pll->min_feedback_div;
}
static u32 avivo_get_post_div(struct radeon_pll *pll,
@@ -831,11 +826,6 @@ static u32 avivo_get_post_div(struct radeon_pll *pll,
post_div--;
}
- if (post_div > pll->max_post_div)
- post_div = pll->max_post_div;
- else if (post_div < pll->min_post_div)
- post_div = pll->min_post_div;
-
return post_div;
}
@@ -971,7 +961,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
+ for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.h b/trunk/drivers/gpu/drm/radeon/radeon_drv.h
index 5cba46b9779a..448eba89d1e6 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,7 +1524,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
#define R600_CP_RB_CNTL 0xc104
# define R600_RB_BUFSZ(x) ((x) << 0)
# define R600_RB_BLKSZ(x) ((x) << 8)
-# define R600_BUF_SWAP_32BIT (2 << 16)
# define R600_RB_NO_UPDATE (1 << 27)
# define R600_RB_RPTR_WR_ENA (1 << 31)
#define R600_CP_RB_RPTR_WR 0xc108
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
index b4274883227f..d4a542247618 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
- args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+ args.v1.usInitInfo = connector_object_id;
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
case 3:
args.v3.sExtEncoder.ucAction = action;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
- args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+ args.v3.sExtEncoder.usConnectorId = connector_object_id;
else
args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,21 +1570,11 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
- if (ASIC_IS_AVIVO(rdev) &&
- (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
- if (ASIC_IS_DCE4(rdev)) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
- EVERGREEN_INTERLEAVE_EN);
- else
- WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- } else {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
- AVIVO_D1MODE_INTERLEAVE_EN);
- else
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- }
+ /* XXX check DCE4 */
+ if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
+ if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
}
}
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fb.c b/trunk/drivers/gpu/drm/radeon/radeon_fb.c
index cc44bdfec80f..66324b5bb5ba 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,14 +113,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
- int height = mode_cmd->height;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- if (rdev->family >= CHIP_R600)
- height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitch * height;
+ size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c
index 1fe95dfe48c9..df95eb83dac6 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c
@@ -156,12 +156,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
- struct ttm_mem_type_manager *man;
-
- man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
- args->vram_visible = (u64)man->size << PAGE_SHIFT;
+ args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 78968b738e88..cf0638c3b7c7 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
(target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
- crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,7 +502,6 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
gen_cntl_val = RREG32(gen_cntl_reg);
gen_cntl_val &= ~(0xf << 8);
gen_cntl_val |= (format << 8);
- gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
WREG32(gen_cntl_reg, gen_cntl_val);
crtc_offset = (u32)base;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h
index a670caaee29e..6794cdf91f28 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,6 @@ enum radeon_connector_table {
CT_EMAC,
CT_RN50_POWER,
CT_MAC_X800,
- CT_MAC_G5_9600,
};
enum radeon_dvo_chip {
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c
index 8389b4c63d12..1272e4b6a1d4 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -589,20 +589,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
DRM_INFO("radeon: ttm finalized\n");
}
-/* this should only be called at bootup or when userspace
- * isn't running */
-void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
-{
- struct ttm_mem_type_manager *man;
-
- if (!rdev->mman.initialized)
- return;
-
- man = &rdev->mman.bdev.man[TTM_PL_VRAM];
- /* this just adjusts TTM size idea, which sets lpfn to the correct value */
- man->size = size >> PAGE_SHIFT;
-}
-
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
@@ -801,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].show = &radeon_mm_dump_table;
radeon_mem_types_list[i].driver_features = 0;
if (i == 0)
- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
else
- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+ radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
}
/* Add ttm page pool to debugfs */
diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r300 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r300
index e8a1786b6426..b506ec1cab4b 100644
--- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,7 +683,9 @@ r300 0x4f60
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -704,11 +706,13 @@ r300 0x4f60
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r420 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r420
index 722074e21e2f..8c1214c2390f 100644
--- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,6 +130,7 @@ r420 0x4f60
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
@@ -749,7 +750,9 @@ r420 0x4f60
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -770,11 +773,13 @@ r420 0x4f60
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/rs600 b/trunk/drivers/gpu/drm/radeon/reg_srcs/rs600
index d9f62866bbc1..0828d80396f2 100644
--- a/trunk/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,7 +749,9 @@ rs600 0x6d40
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -770,11 +772,13 @@ rs600 0x6d40
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/rv515 b/trunk/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..ef422bbacfc1 100644
--- a/trunk/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,6 +164,7 @@ rv515 0x6d40
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
0x4114 SU_TEX_WRAP_PS3
0x4118 PS3_ENABLE
@@ -460,7 +461,9 @@ rv515 0x6d40
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
@@ -481,6 +484,9 @@ rv515 0x6d40
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -490,5 +496,4 @@ rv515 0x6d40
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
0x4F58 ZB_ZPASS_DATA
-0x4F28 ZB_DEPTHCLEARVALUE
0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c
index 8af4679db23e..5afe294ed51f 100644
--- a/trunk/drivers/gpu/drm/radeon/rs600.c
+++ b/trunk/drivers/gpu/drm/radeon/rs600.c
@@ -751,6 +751,7 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c
index 66c949b7c18c..0137d3e3728d 100644
--- a/trunk/drivers/gpu/drm/radeon/rs690.c
+++ b/trunk/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
switch (crev) {
case 1:
tmp.full = dfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- if (le16_to_cpu(info->info.usK8MemoryClock))
+ if (info->info.usK8MemoryClock)
rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
else if (rdev->clock.default_mclk) {
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
break;
case 2:
tmp.full = dfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
- rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
+ if (info->info_v2.ulBootUpUMAClock)
+ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
else if (rdev->clock.default_mclk)
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
else
rdev->pm.igp_system_mclk.full = dfixed_const(66700);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
@@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c
index 714ad45757d0..2211a323db41 100644
--- a/trunk/drivers/gpu/drm/radeon/rv770.c
+++ b/trunk/drivers/gpu/drm/radeon/rv770.c
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
*/
void r700_cp_stop(struct radeon_device *rdev)
{
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
}
@@ -321,11 +321,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return -EINVAL;
r700_cp_stop(rdev);
- WREG32(CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- BUF_SWAP_32BIT |
-#endif
- RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -1123,6 +1119,7 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
diff --git a/trunk/drivers/gpu/drm/radeon/rv770d.h b/trunk/drivers/gpu/drm/radeon/rv770d.h
index 79fa588e9ed5..abc8cf5a3672 100644
--- a/trunk/drivers/gpu/drm/radeon/rv770d.h
+++ b/trunk/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
#define ROQ_IB1_START(x) ((x) << 0)
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_RB_CNTL 0xC104
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
+#define RB_BUFSZ(x) ((x)<<0)
+#define RB_BLKSZ(x) ((x)<<8)
+#define RB_NO_UPDATE (1<<27)
+#define RB_RPTR_WR_ENA (1<<31)
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
diff --git a/trunk/drivers/hwmon/Kconfig b/trunk/drivers/hwmon/Kconfig
index 297bc9a7d6e6..773e484f1646 100644
--- a/trunk/drivers/hwmon/Kconfig
+++ b/trunk/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
will be called k8temp.
config SENSORS_K10TEMP
- tristate "AMD Family 10h/11h/12h/14h temperature sensor"
+ tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
depends on X86 && PCI
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
- the AMD Family 10h and all revisions of the AMD Family 11h,
- 12h (Llano), and 14h (Brazos) microarchitectures.
+ the AMD Family 10h and all revisions of the AMD Family 11h
+ microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -455,14 +455,13 @@ config SENSORS_JZ4740
called jz4740-hwmon.
config SENSORS_JC42
- tristate "JEDEC JC42.4 compliant memory module temperature sensors"
+ tristate "JEDEC JC42.4 compliant temperature sensors"
depends on I2C
help
- If you say yes here, you get support for JEDEC JC42.4 compliant
- temperature sensors, which are used on many DDR3 memory modules for
- mobile devices and servers. Support will include, but not be limited
- to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
+ If you say yes here you get support for Jedec JC42.4 compliant
+ temperature sensors. Support will include, but not be limited to,
+ ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+ MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -575,7 +574,7 @@ config SENSORS_LM85
help
If you say yes here you get support for National Semiconductor LM85
sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
- EMC6D101, EMC6D102, and EMC6D103.
+ EMC6D101 and EMC6D102.
This driver can also be built as a module. If so, the module
will be called lm85.
diff --git a/trunk/drivers/hwmon/ad7414.c b/trunk/drivers/hwmon/ad7414.c
index d46c0c758ddf..86d822aa9bbf 100644
--- a/trunk/drivers/hwmon/ad7414.c
+++ b/trunk/drivers/hwmon/ad7414.c
@@ -242,7 +242,6 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
-MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
diff --git a/trunk/drivers/hwmon/adt7411.c b/trunk/drivers/hwmon/adt7411.c
index 5cc3e3784b42..f13c843a2964 100644
--- a/trunk/drivers/hwmon/adt7411.c
+++ b/trunk/drivers/hwmon/adt7411.c
@@ -334,7 +334,6 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
diff --git a/trunk/drivers/hwmon/emc1403.c b/trunk/drivers/hwmon/emc1403.c
index cd2a6e437aec..5dea9faa1656 100644
--- a/trunk/drivers/hwmon/emc1403.c
+++ b/trunk/drivers/hwmon/emc1403.c
@@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client)
}
static const unsigned short emc1403_address_list[] = {
- 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
+ 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
};
static const struct i2c_device_id emc1403_idtable[] = {
diff --git a/trunk/drivers/hwmon/f71882fg.c b/trunk/drivers/hwmon/f71882fg.c
index 6e06019015a5..3f49dd376f02 100644
--- a/trunk/drivers/hwmon/f71882fg.c
+++ b/trunk/drivers/hwmon/f71882fg.c
@@ -37,7 +37,7 @@
#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */
#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
-#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
+#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -2111,6 +2111,7 @@ static int f71882fg_remove(struct platform_device *pdev)
int nr_fans = (data->type == f71882fg) ? 4 : 3;
u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
+ platform_set_drvdata(pdev, NULL);
if (data->hwmon_dev)
hwmon_device_unregister(data->hwmon_dev);
@@ -2177,7 +2178,6 @@ static int f71882fg_remove(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
diff --git a/trunk/drivers/hwmon/jc42.c b/trunk/drivers/hwmon/jc42.c
index 934991237061..340fc78c8dde 100644
--- a/trunk/drivers/hwmon/jc42.c
+++ b/trunk/drivers/hwmon/jc42.c
@@ -53,8 +53,6 @@ static const unsigned short normal_i2c[] = {
/* Configuration register defines */
#define JC42_CFG_CRIT_ONLY (1 << 2)
-#define JC42_CFG_TCRIT_LOCK (1 << 6)
-#define JC42_CFG_EVENT_LOCK (1 << 7)
#define JC42_CFG_SHUTDOWN (1 << 8)
#define JC42_CFG_HYST_SHIFT 9
#define JC42_CFG_HYST_MASK 0x03
@@ -334,7 +332,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct jc42_data *data = i2c_get_clientdata(client);
- unsigned long val;
+ long val;
int diff, hyst;
int err;
int ret = count;
@@ -382,14 +380,14 @@ static ssize_t show_alarm(struct device *dev,
static DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_temp_max, set_temp_max);
-static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
show_temp_crit_hyst, set_temp_crit_hyst);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
show_temp_max_hyst, NULL);
@@ -414,31 +412,8 @@ static struct attribute *jc42_attributes[] = {
NULL
};
-static mode_t jc42_attribute_mode(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct i2c_client *client = to_i2c_client(dev);
- struct jc42_data *data = i2c_get_clientdata(client);
- unsigned int config = data->config;
- bool readonly;
-
- if (attr == &dev_attr_temp1_crit.attr)
- readonly = config & JC42_CFG_TCRIT_LOCK;
- else if (attr == &dev_attr_temp1_min.attr ||
- attr == &dev_attr_temp1_max.attr)
- readonly = config & JC42_CFG_EVENT_LOCK;
- else if (attr == &dev_attr_temp1_crit_hyst.attr)
- readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
- else
- readonly = true;
-
- return S_IRUGO | (readonly ? 0 : S_IWUSR);
-}
-
static const struct attribute_group jc42_group = {
.attrs = jc42_attributes,
- .is_visible = jc42_attribute_mode,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/trunk/drivers/hwmon/k10temp.c b/trunk/drivers/hwmon/k10temp.c
index 82bf65aa2968..da5a2404cd3e 100644
--- a/trunk/drivers/hwmon/k10temp.c
+++ b/trunk/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
/*
- * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch
*
@@ -25,7 +25,7 @@
#include
#include
-MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
MODULE_AUTHOR("Clemens Ladisch ");
MODULE_LICENSE("GPL");
@@ -208,7 +208,6 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/trunk/drivers/hwmon/lm63.c b/trunk/drivers/hwmon/lm63.c
index 508cb291f71b..776aeb3019d2 100644
--- a/trunk/drivers/hwmon/lm63.c
+++ b/trunk/drivers/hwmon/lm63.c
@@ -98,9 +98,6 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
* value, it uses signed 8-bit values with LSB = 1 degree Celsius.
* For remote temperature, low and high limits, it uses signed 11-bit values
* with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
- * For LM64 the actual remote diode temperature is 16 degree Celsius higher
- * than the register reading. Remote temperature setpoints have to be
- * adapted accordingly.
*/
#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -168,8 +165,6 @@ struct lm63_data {
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
- int kind;
- int temp2_offset;
/* registers values */
u8 config, config_fan;
@@ -252,34 +247,16 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
}
-/*
- * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
- * For remote sensor registers temp2_offset has to be considered,
- * for local sensor it must not.
- * So we need separate 8bit accessors for local and remote sensor.
- */
-static ssize_t show_local_temp8(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
+static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
}
-static ssize_t show_remote_temp8(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
- + data->temp2_offset);
-}
-
-static ssize_t set_local_temp8(struct device *dev,
- struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
@@ -297,8 +274,7 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
- + data->temp2_offset);
+ return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
}
static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -318,7 +294,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
int nr = attr->index;
mutex_lock(&data->update_lock);
- data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+ data->temp11[nr] = TEMP11_TO_REG(val);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
data->temp11[nr] >> 8);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -334,7 +310,6 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
- + data->temp2_offset
- TEMP8_FROM_REG(data->temp2_crit_hyst));
}
@@ -349,7 +324,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
long hyst;
mutex_lock(&data->update_lock);
- hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
+ hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
HYST_TO_REG(hyst));
mutex_unlock(&data->update_lock);
@@ -380,21 +355,16 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
- set_local_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
+ set_temp8, 1);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 2);
-/*
- * On LM63, temp2_crit can be set only once, which should be job
- * of the bootloader.
- */
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
- NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
set_temp2_crit_hyst);
@@ -509,12 +479,7 @@ static int lm63_probe(struct i2c_client *new_client,
data->valid = 0;
mutex_init(&data->update_lock);
- /* Set the device type */
- data->kind = id->driver_data;
- if (data->kind == lm64)
- data->temp2_offset = 16000;
-
- /* Initialize chip */
+ /* Initialize the LM63 chip */
lm63_init_client(new_client);
/* Register sysfs hooks */
diff --git a/trunk/drivers/hwmon/lm85.c b/trunk/drivers/hwmon/lm85.c
index d2cc28660816..1e229847f37a 100644
--- a/trunk/drivers/hwmon/lm85.c
+++ b/trunk/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102, emc6d103
+ emc6d100, emc6d102
};
/* The LM85 registers */
@@ -90,9 +90,6 @@ enum chips {
#define LM85_VERSTEP_EMC6D100_A0 0x60
#define LM85_VERSTEP_EMC6D100_A1 0x61
#define LM85_VERSTEP_EMC6D102 0x65
-#define LM85_VERSTEP_EMC6D103_A0 0x68
-#define LM85_VERSTEP_EMC6D103_A1 0x69
-#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
#define LM85_REG_CONFIG 0x40
@@ -351,7 +348,6 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d100", emc6d100 },
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
- { "emc6d103", emc6d103 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1254,20 +1250,6 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D102:
type_name = "emc6d102";
break;
- case LM85_VERSTEP_EMC6D103_A0:
- case LM85_VERSTEP_EMC6D103_A1:
- type_name = "emc6d103";
- break;
- /*
- * Registers apparently missing in EMC6D103S/EMC6D103:A2
- * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
- * (according to the data sheets), but used unconditionally
- * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
- * So skip EMC6D103S for now.
- case LM85_VERSTEP_EMC6D103S:
- type_name = "emc6d103s";
- break;
- */
}
} else {
dev_dbg(&adapter->dev,
@@ -1301,7 +1283,6 @@ static int lm85_probe(struct i2c_client *client,
case adt7468:
case emc6d100:
case emc6d102:
- case emc6d103:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1487,7 +1468,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102 || data->type == emc6d103) {
+ } else if (data->type == emc6d102) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
diff --git a/trunk/drivers/i2c/busses/i2c-eg20t.c b/trunk/drivers/i2c/busses/i2c-eg20t.c
index 50ea1f43bdc1..2e067dd2ee51 100644
--- a/trunk/drivers/i2c/busses/i2c-eg20t.c
+++ b/trunk/drivers/i2c/busses/i2c-eg20t.c
@@ -29,7 +29,6 @@
#include
#include
#include
-#include
#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */
#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
diff --git a/trunk/drivers/i2c/busses/i2c-ocores.c b/trunk/drivers/i2c/busses/i2c-ocores.c
index 61653f079671..ef3bcb1ce864 100644
--- a/trunk/drivers/i2c/busses/i2c-ocores.c
+++ b/trunk/drivers/i2c/busses/i2c-ocores.c
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = {
static int ocores_i2c_of_probe(struct platform_device* pdev,
struct ocores_i2c* i2c)
{
- const __be32* val;
+ __be32* val;
val = of_get_property(pdev->dev.of_node, "regstep", NULL);
if (!val) {
diff --git a/trunk/drivers/i2c/busses/i2c-omap.c b/trunk/drivers/i2c/busses/i2c-omap.c
index 58a58c7eaa17..b605ff3a1fa0 100644
--- a/trunk/drivers/i2c/busses/i2c-omap.c
+++ b/trunk/drivers/i2c/busses/i2c-omap.c
@@ -378,7 +378,9 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ if (dev->rev < OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
+ dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -845,15 +847,11 @@ omap_i2c_isr(int this_irq, void *dev_id)
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
}
- /*
- * ProDB0017052: Clear ARDY bit twice
- */
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat &
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
- OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
- OMAP_I2C_STAT_ARDY));
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
@@ -1139,41 +1137,12 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_SUSPEND
-static int omap_i2c_suspend(struct device *dev)
-{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- dev->bus->pm->runtime_suspend(dev);
-
- return 0;
-}
-
-static int omap_i2c_resume(struct device *dev)
-{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- dev->bus->pm->runtime_resume(dev);
-
- return 0;
-}
-
-static struct dev_pm_ops omap_i2c_pm_ops = {
- .suspend = omap_i2c_suspend,
- .resume = omap_i2c_resume,
-};
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif
-
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
- .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/trunk/drivers/i2c/busses/i2c-stu300.c b/trunk/drivers/i2c/busses/i2c-stu300.c
index 266135ddf7fa..495be451d326 100644
--- a/trunk/drivers/i2c/busses/i2c-stu300.c
+++ b/trunk/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
adap->class = I2C_CLASS_DDC;
- strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
+ strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
adap->algo = &stu300_algo;
diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c
index 4a5c4a44ffb1..1fa091e05690 100644
--- a/trunk/drivers/idle/intel_idle.c
+++ b/trunk/drivers/idle/intel_idle.c
@@ -62,7 +62,6 @@
#include
#include
#include
-#include
#define INTEL_IDLE_VERSION "0.4"
#define PREFIX "intel_idle: "
@@ -85,12 +84,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
-/*
- * Hardware C-state auto-demotion may not always be optimal.
- * Indicate which enable bits to clear here.
- */
-static unsigned long long auto_demotion_disable_flags;
-
/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
@@ -288,15 +281,6 @@ static struct notifier_block setup_broadcast_notifier = {
.notifier_call = setup_broadcast_cpuhp_notify,
};
-static void auto_demotion_disable(void *dummy)
-{
- unsigned long long msr_bits;
-
- rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
- msr_bits &= ~auto_demotion_disable_flags;
- wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
-}
-
/*
* intel_idle_probe()
*/
@@ -340,17 +324,11 @@ static int intel_idle_probe(void)
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
- auto_demotion_disable_flags =
- (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
break;
case 0x1C: /* 28 - Atom Processor */
- cpuidle_state_table = atom_cstates;
- break;
-
case 0x26: /* 38 - Lincroft Atom Processor */
cpuidle_state_table = atom_cstates;
- auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
break;
case 0x2A: /* SNB */
@@ -458,8 +436,6 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
- if (auto_demotion_disable_flags)
- smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.c b/trunk/drivers/infiniband/hw/nes/nes_hw.c
index 08c194861af5..8b606fd64022 100644
--- a/trunk/drivers/infiniband/hw/nes/nes_hw.c
+++ b/trunk/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,11 +2610,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_carrier_on(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
- }
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2644,11 +2642,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_carrier_off(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
- }
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2707,11 +2703,9 @@ void nes_recheck_link_status(struct work_struct *work)
netif_carrier_on(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
- }
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2729,11 +2723,9 @@ void nes_recheck_link_status(struct work_struct *work)
netif_carrier_off(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesvnic->of_device_registered) {
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
- }
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
diff --git a/trunk/drivers/infiniband/hw/qib/qib_rc.c b/trunk/drivers/infiniband/hw/qib/qib_rc.c
index eca0c41f1226..8245237b67ce 100644
--- a/trunk/drivers/infiniband/hw/qib/qib_rc.c
+++ b/trunk/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,8 +1005,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked.
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
- (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
start_timer(qp);
while (qp->s_last != qp->s_acked) {
@@ -1440,8 +1439,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
}
spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
- goto ack_done;
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/trunk/drivers/input/gameport/gameport.c b/trunk/drivers/input/gameport/gameport.c
index 5b8f59d6c3e8..23cf8fc933ec 100644
--- a/trunk/drivers/input/gameport/gameport.c
+++ b/trunk/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &gameport_event_list);
- queue_work(system_long_wq, &gameport_event_work);
+ schedule_work(&gameport_event_work);
out:
spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c
index 11905b6a3023..7985114beac7 100644
--- a/trunk/drivers/input/input.c
+++ b/trunk/drivers/input/input.c
@@ -75,6 +75,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
* dev->event_lock held and interrupts disabled.
*/
static void input_pass_event(struct input_dev *dev,
+ struct input_handler *src_handler,
unsigned int type, unsigned int code, int value)
{
struct input_handler *handler;
@@ -93,6 +94,15 @@ static void input_pass_event(struct input_dev *dev,
continue;
handler = handle->handler;
+
+ /*
+ * If this is the handler that injected this
+ * particular event we want to skip it to avoid
+ * filters firing again and again.
+ */
+ if (handler == src_handler)
+ continue;
+
if (!handler->filter) {
if (filtered)
break;
@@ -122,7 +132,7 @@ static void input_repeat_key(unsigned long data)
if (test_bit(dev->repeat_key, dev->key) &&
is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
- input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
+ input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
if (dev->sync) {
/*
@@ -131,7 +141,7 @@ static void input_repeat_key(unsigned long data)
* Otherwise assume that the driver will send
* SYN_REPORT once it's done.
*/
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
}
if (dev->rep[REP_PERIOD])
@@ -164,6 +174,7 @@ static void input_stop_autorepeat(struct input_dev *dev)
#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
static int input_handle_abs_event(struct input_dev *dev,
+ struct input_handler *src_handler,
unsigned int code, int *pval)
{
bool is_mt_event;
@@ -207,13 +218,15 @@ static int input_handle_abs_event(struct input_dev *dev,
/* Flush pending "slot" event */
if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
- input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
+ input_pass_event(dev, src_handler,
+ EV_ABS, ABS_MT_SLOT, dev->slot);
}
return INPUT_PASS_TO_HANDLERS;
}
static void input_handle_event(struct input_dev *dev,
+ struct input_handler *src_handler,
unsigned int type, unsigned int code, int value)
{
int disposition = INPUT_IGNORE_EVENT;
@@ -266,7 +279,8 @@ static void input_handle_event(struct input_dev *dev,
case EV_ABS:
if (is_event_supported(code, dev->absbit, ABS_MAX))
- disposition = input_handle_abs_event(dev, code, &value);
+ disposition = input_handle_abs_event(dev, src_handler,
+ code, &value);
break;
@@ -324,7 +338,7 @@ static void input_handle_event(struct input_dev *dev,
dev->event(dev, type, code, value);
if (disposition & INPUT_PASS_TO_HANDLERS)
- input_pass_event(dev, type, code, value);
+ input_pass_event(dev, src_handler, type, code, value);
}
/**
@@ -353,7 +367,7 @@ void input_event(struct input_dev *dev,
spin_lock_irqsave(&dev->event_lock, flags);
add_input_randomness(type, code, value);
- input_handle_event(dev, type, code, value);
+ input_handle_event(dev, NULL, type, code, value);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
@@ -383,7 +397,8 @@ void input_inject_event(struct input_handle *handle,
rcu_read_lock();
grab = rcu_dereference(dev->grab);
if (!grab || grab == handle)
- input_handle_event(dev, type, code, value);
+ input_handle_event(dev, handle->handler,
+ type, code, value);
rcu_read_unlock();
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -596,10 +611,10 @@ static void input_dev_release_keys(struct input_dev *dev)
for (code = 0; code <= KEY_MAX; code++) {
if (is_event_supported(code, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(code, dev->key)) {
- input_pass_event(dev, EV_KEY, code, 0);
+ input_pass_event(dev, NULL, EV_KEY, code, 0);
}
}
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
}
}
@@ -874,9 +889,9 @@ int input_set_keycode(struct input_dev *dev,
!is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
__test_and_clear_bit(old_keycode, dev->key)) {
- input_pass_event(dev, EV_KEY, old_keycode, 0);
+ input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
if (dev->sync)
- input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
}
out:
diff --git a/trunk/drivers/input/keyboard/tegra-kbc.c b/trunk/drivers/input/keyboard/tegra-kbc.c
index 99ce9032d08c..ac471b77c18e 100644
--- a/trunk/drivers/input/keyboard/tegra-kbc.c
+++ b/trunk/drivers/input/keyboard/tegra-kbc.c
@@ -71,9 +71,8 @@ struct tegra_kbc {
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
- bool use_fn_map;
const struct tegra_kbc_platform_data *pdata;
- unsigned short keycode[KBC_MAX_KEY * 2];
+ unsigned short keycode[KBC_MAX_KEY];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
struct timer_list timer;
@@ -179,40 +178,6 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(15, 5, KEY_F2),
KEY(15, 6, KEY_CAPSLOCK),
KEY(15, 7, KEY_F6),
-
- /* Software Handled Function Keys */
- KEY(20, 0, KEY_KP7),
-
- KEY(21, 0, KEY_KP9),
- KEY(21, 1, KEY_KP8),
- KEY(21, 2, KEY_KP4),
- KEY(21, 4, KEY_KP1),
-
- KEY(22, 1, KEY_KPSLASH),
- KEY(22, 2, KEY_KP6),
- KEY(22, 3, KEY_KP5),
- KEY(22, 4, KEY_KP3),
- KEY(22, 5, KEY_KP2),
- KEY(22, 7, KEY_KP0),
-
- KEY(27, 1, KEY_KPASTERISK),
- KEY(27, 3, KEY_KPMINUS),
- KEY(27, 4, KEY_KPPLUS),
- KEY(27, 5, KEY_KPDOT),
-
- KEY(28, 5, KEY_VOLUMEUP),
-
- KEY(29, 3, KEY_HOME),
- KEY(29, 4, KEY_END),
- KEY(29, 5, KEY_BRIGHTNESSDOWN),
- KEY(29, 6, KEY_VOLUMEDOWN),
- KEY(29, 7, KEY_BRIGHTNESSUP),
-
- KEY(30, 0, KEY_NUMLOCK),
- KEY(30, 1, KEY_SCROLLLOCK),
- KEY(30, 2, KEY_MUTE),
-
- KEY(31, 4, KEY_HELP),
};
static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -259,7 +224,6 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
unsigned int i;
unsigned int num_down = 0;
unsigned long flags;
- bool fn_keypress = false;
spin_lock_irqsave(&kbc->lock, flags);
for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -273,28 +237,11 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
scancodes[num_down] = scancode;
- keycodes[num_down] = kbc->keycode[scancode];
- /* If driver uses Fn map, do not report the Fn key. */
- if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
- fn_keypress = true;
- else
- num_down++;
+ keycodes[num_down++] = kbc->keycode[scancode];
}
val >>= 8;
}
-
- /*
- * If the platform uses Fn keymaps, translate keys on a Fn keypress.
- * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
- */
- if (fn_keypress) {
- for (i = 0; i < num_down; i++) {
- scancodes[i] += KBC_MAX_KEY;
- keycodes[i] = kbc->keycode[scancodes[i]];
- }
- }
-
spin_unlock_irqrestore(&kbc->lock, flags);
tegra_kbc_report_released_keys(kbc->idev,
@@ -647,11 +594,8 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
input_dev->keycode = kbc->keycode;
input_dev->keycodesize = sizeof(kbc->keycode[0]);
- input_dev->keycodemax = KBC_MAX_KEY;
- if (pdata->use_fn_map)
- input_dev->keycodemax *= 2;
+ input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
- kbc->use_fn_map = pdata->use_fn_map;
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
diff --git a/trunk/drivers/input/misc/rotary_encoder.c b/trunk/drivers/input/misc/rotary_encoder.c
index 7e64d01da2be..1f8e0108962e 100644
--- a/trunk/drivers/input/misc/rotary_encoder.c
+++ b/trunk/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
/* request the IRQs */
err = request_irq(encoder->irq_a, &rotary_encoder_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
DRV_NAME, encoder);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
}
err = request_irq(encoder->irq_b, &rotary_encoder_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
DRV_NAME, encoder);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/trunk/drivers/input/mouse/synaptics.h b/trunk/drivers/input/mouse/synaptics.h
index 7453938bf5ef..25e5d042a72c 100644
--- a/trunk/drivers/input/mouse/synaptics.h
+++ b/trunk/drivers/input/mouse/synaptics.h
@@ -51,29 +51,6 @@
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
-
-/*
- * The following describes response for the 0x0c query.
- *
- * byte mask name meaning
- * ---- ---- ------- ------------
- * 1 0x01 adjustable threshold capacitive button sensitivity
- * can be adjusted
- * 1 0x02 report max query 0x0d gives max coord reported
- * 1 0x04 clearpad sensor is ClearPad product
- * 1 0x08 advanced gesture not particularly meaningful
- * 1 0x10 clickpad bit 0 1-button ClickPad
- * 1 0x60 multifinger mode identifies firmware finger counting
- * (not reporting!) algorithm.
- * Not particularly meaningful
- * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
- * 2 0x01 clickpad bit 1 2-button ClickPad
- * 2 0x02 deluxe LED controls touchpad support LED commands
- * ala multimedia control bar
- * 2 0x04 reduced filtering firmware does less filtering on
- * position data, driver should watch
- * for noise.
- */
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/trunk/drivers/input/serio/serio.c b/trunk/drivers/input/serio/serio.c
index ba70058e2be3..db5b0bca1a1a 100644
--- a/trunk/drivers/input/serio/serio.c
+++ b/trunk/drivers/input/serio/serio.c
@@ -188,8 +188,7 @@ static void serio_free_event(struct serio_event *event)
kfree(event);
}
-static void serio_remove_duplicate_events(void *object,
- enum serio_event_type type)
+static void serio_remove_duplicate_events(struct serio_event *event)
{
struct serio_event *e, *next;
unsigned long flags;
@@ -197,13 +196,13 @@ static void serio_remove_duplicate_events(void *object,
spin_lock_irqsave(&serio_event_lock, flags);
list_for_each_entry_safe(e, next, &serio_event_list, node) {
- if (object == e->object) {
+ if (event->object == e->object) {
/*
* If this event is of different type we should not
* look further - we only suppress duplicate events
* that were sent back-to-back.
*/
- if (type != e->type)
+ if (event->type != e->type)
break;
list_del_init(&e->node);
@@ -246,7 +245,7 @@ static void serio_handle_event(struct work_struct *work)
break;
}
- serio_remove_duplicate_events(event->object, event->type);
+ serio_remove_duplicate_events(event);
serio_free_event(event);
}
@@ -299,7 +298,7 @@ static int serio_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &serio_event_list);
- queue_work(system_long_wq, &serio_event_work);
+ schedule_work(&serio_event_work);
out:
spin_unlock_irqrestore(&serio_event_lock, flags);
@@ -437,12 +436,10 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
} else if (!strncmp(buf, "rescan", count)) {
serio_disconnect_port(serio);
serio_find_driver(serio);
- serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
} else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
serio_disconnect_port(serio);
error = serio_bind_driver(serio, to_serio_driver(drv));
put_driver(drv);
- serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
} else {
error = -EINVAL;
}
diff --git a/trunk/drivers/input/tablet/wacom_sys.c b/trunk/drivers/input/tablet/wacom_sys.c
index cf8fb9f5d4a8..fc381498b798 100644
--- a/trunk/drivers/input/tablet/wacom_sys.c
+++ b/trunk/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
/* Retrieve the physical and logical size for OEM devices */
error = wacom_retrieve_hid_descriptor(intf, features);
if (error)
- goto fail3;
+ goto fail2;
wacom_setup_device_quirks(features);
diff --git a/trunk/drivers/input/touchscreen/ads7846.c b/trunk/drivers/input/touchscreen/ads7846.c
index 4bf2316e3284..14ea54b78e46 100644
--- a/trunk/drivers/input/touchscreen/ads7846.c
+++ b/trunk/drivers/input/touchscreen/ads7846.c
@@ -941,30 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
struct ads7846_platform_data *pdata = spi->dev.platform_data;
int err;
- /*
- * REVISIT when the irq can be triggered active-low, or if for some
+ /* REVISIT when the irq can be triggered active-low, or if for some
* reason the touchscreen isn't hooked up, we don't need to access
* the pendown state.
*/
+ if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
+ dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
+ return -EINVAL;
+ }
if (pdata->get_pendown_state) {
ts->get_pendown_state = pdata->get_pendown_state;
- } else if (gpio_is_valid(pdata->gpio_pendown)) {
-
- err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
- if (err) {
- dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
- pdata->gpio_pendown);
- return err;
- }
-
- ts->gpio_pendown = pdata->gpio_pendown;
+ return 0;
+ }
- } else {
- dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
- return -EINVAL;
+ err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+ if (err) {
+ dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
+ pdata->gpio_pendown);
+ return err;
}
+ ts->gpio_pendown = pdata->gpio_pendown;
+
return 0;
}
@@ -1354,7 +1353,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
err_put_regulator:
regulator_put(ts->reg);
err_free_gpio:
- if (!ts->get_pendown_state)
+ if (ts->gpio_pendown != -1)
gpio_free(ts->gpio_pendown);
err_cleanup_filter:
if (ts->filter_cleanup)
@@ -1384,13 +1383,8 @@ static int __devexit ads7846_remove(struct spi_device *spi)
regulator_disable(ts->reg);
regulator_put(ts->reg);
- if (!ts->get_pendown_state) {
- /*
- * If we are not using specialized pendown method we must
- * have been relying on gpio we set up ourselves.
- */
+ if (ts->gpio_pendown != -1)
gpio_free(ts->gpio_pendown);
- }
if (ts->filter_cleanup)
ts->filter_cleanup(ts->filter_data);
diff --git a/trunk/drivers/input/touchscreen/wacom_w8001.c b/trunk/drivers/input/touchscreen/wacom_w8001.c
index c14412ef4648..5cb8449c909d 100644
--- a/trunk/drivers/input/touchscreen/wacom_w8001.c
+++ b/trunk/drivers/input/touchscreen/wacom_w8001.c
@@ -51,10 +51,6 @@ MODULE_LICENSE("GPL");
#define W8001_PKTLEN_TPCCTL 11 /* control packet */
#define W8001_PKTLEN_TOUCH2FG 13
-/* resolution in points/mm */
-#define W8001_PEN_RESOLUTION 100
-#define W8001_TOUCH_RESOLUTION 10
-
struct w8001_coord {
u8 rdy;
u8 tsw;
@@ -202,7 +198,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
query->y = 1024;
if (query->panel_res)
query->x = query->y = (1 << query->panel_res);
- query->panel_res = W8001_TOUCH_RESOLUTION;
+ query->panel_res = 10;
}
}
@@ -398,8 +394,6 @@ static int w8001_setup(struct w8001 *w8001)
input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
- input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
- input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
if (coord.tilt_x && coord.tilt_y) {
input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
@@ -424,17 +418,14 @@ static int w8001_setup(struct w8001 *w8001)
w8001->max_touch_x = touch.x;
w8001->max_touch_y = touch.y;
+ /* scale to pen maximum */
if (w8001->max_pen_x && w8001->max_pen_y) {
- /* if pen is supported scale to pen maximum */
touch.x = w8001->max_pen_x;
touch.y = w8001->max_pen_y;
- touch.panel_res = W8001_PEN_RESOLUTION;
}
input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
- input_abs_set_res(dev, ABS_X, touch.panel_res);
- input_abs_set_res(dev, ABS_Y, touch.panel_res);
switch (touch.sensor_id) {
case 0:
diff --git a/trunk/drivers/isdn/hardware/eicon/istream.c b/trunk/drivers/isdn/hardware/eicon/istream.c
index 7bd5baa547be..18f8798442fa 100644
--- a/trunk/drivers/isdn/hardware/eicon/istream.c
+++ b/trunk/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
stream interface.
If synchronous service was requested, then function
does return amount of data written to stream.
- 'final' does indicate that piece of data to be written is
+ 'final' does indicate that pice of data to be written is
final part of frame (necessary only by structured datatransfer)
return 0 if zero lengh packet was written
return -1 if stream is full
diff --git a/trunk/drivers/isdn/hisax/isdnl2.c b/trunk/drivers/isdn/hisax/isdnl2.c
index cfff0c41d298..0858791978d8 100644
--- a/trunk/drivers/isdn/hisax/isdnl2.c
+++ b/trunk/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
- struct sk_buff *skb;
+ struct sk_buff *skb, *oskb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
- int i, hdr_space_needed;
+ int i;
int unsigned p1;
u_long flags;
@@ -1261,16 +1261,6 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
if (!skb)
return;
- hdr_space_needed = l2headersize(l2, 0);
- if (hdr_space_needed > skb_headroom(skb)) {
- struct sk_buff *orig_skb = skb;
-
- skb = skb_realloc_headroom(skb, hdr_space_needed);
- if (!skb) {
- dev_kfree_skb(orig_skb);
- return;
- }
- }
spin_lock_irqsave(&l2->lock, flags);
if(test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
@@ -1295,7 +1285,19 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
- memcpy(skb_push(skb, i), header, i);
+ p1 = skb->data - skb->head;
+ if (p1 >= i)
+ memcpy(skb_push(skb, i), header, i);
+ else {
+ printk(KERN_WARNING
+ "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
+ oskb = skb;
+ skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
+ memcpy(skb_put(skb, i), header, i);
+ skb_copy_from_linear_data(oskb,
+ skb_put(skb, oskb->len), oskb->len);
+ dev_kfree_skb(oskb);
+ }
st->l2.l2l1(st, PH_PULL | INDICATION, skb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/trunk/drivers/isdn/hysdn/hysdn_defs.h b/trunk/drivers/isdn/hysdn/hysdn_defs.h
index 18b801ad97a4..729df4089385 100644
--- a/trunk/drivers/isdn/hysdn/hysdn_defs.h
+++ b/trunk/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,6 +227,7 @@ extern hysdn_card *card_root; /* pointer to first card */
/*************************/
/* im/exported functions */
/*************************/
+extern char *hysdn_getrev(const char *);
/* hysdn_procconf.c */
extern int hysdn_procconf_init(void); /* init proc config filesys */
@@ -258,6 +259,7 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
/* hysdn_net.c */
extern unsigned int hynet_enable;
+extern char *hysdn_net_revision;
extern int hysdn_net_create(hysdn_card *); /* create a new net device */
extern int hysdn_net_release(hysdn_card *); /* delete the device */
extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
diff --git a/trunk/drivers/isdn/hysdn/hysdn_init.c b/trunk/drivers/isdn/hysdn/hysdn_init.c
index 0ab42ace1692..b7cc5c2f08c6 100644
--- a/trunk/drivers/isdn/hysdn/hysdn_init.c
+++ b/trunk/drivers/isdn/hysdn/hysdn_init.c
@@ -36,6 +36,7 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
MODULE_AUTHOR("Werner Cornelius");
MODULE_LICENSE("GPL");
+static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
static int cardmax; /* number of found cards */
hysdn_card *card_root = NULL; /* pointer to first card */
static hysdn_card *card_last = NULL; /* pointer to first card */
@@ -48,6 +49,25 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
/* Additionally newer versions may be activated without rebooting. */
/****************************************************************************/
+/******************************************************/
+/* extract revision number from string for log output */
+/******************************************************/
+char *
+hysdn_getrev(const char *revision)
+{
+ char *rev;
+ char *p;
+
+ if ((p = strchr(revision, ':'))) {
+ rev = p + 2;
+ p = strchr(rev, '$');
+ *--p = 0;
+ } else
+ rev = "???";
+ return rev;
+}
+
+
/****************************************************************************/
/* init_module is called once when the module is loaded to do all necessary */
/* things like autodetect... */
@@ -155,9 +175,13 @@ static int hysdn_have_procfs;
static int __init
hysdn_init(void)
{
+ char tmp[50];
int rc;
- printk(KERN_NOTICE "HYSDN: module loaded\n");
+ strcpy(tmp, hysdn_init_revision);
+ printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
+ strcpy(tmp, hysdn_net_revision);
+ printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
rc = pci_register_driver(&hysdn_pci_driver);
if (rc)
diff --git a/trunk/drivers/isdn/hysdn/hysdn_net.c b/trunk/drivers/isdn/hysdn/hysdn_net.c
index 11f2cce26005..feec8d89d719 100644
--- a/trunk/drivers/isdn/hysdn/hysdn_net.c
+++ b/trunk/drivers/isdn/hysdn/hysdn_net.c
@@ -26,6 +26,9 @@
unsigned int hynet_enable = 0xffffffff;
module_param(hynet_enable, uint, 0);
+/* store the actual version for log reporting */
+char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
+
#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
/****************************************************************************/
diff --git a/trunk/drivers/isdn/hysdn/hysdn_procconf.c b/trunk/drivers/isdn/hysdn/hysdn_procconf.c
index 5fe83bd42061..96b3e39c3356 100644
--- a/trunk/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/trunk/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,6 +23,7 @@
#include "hysdn_defs.h"
static DEFINE_MUTEX(hysdn_conf_mutex);
+static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
#define INFO_OUT_LEN 80 /* length of info line including lf */
@@ -403,7 +404,7 @@ hysdn_procconf_init(void)
card = card->next; /* next entry */
}
- printk(KERN_NOTICE "HYSDN: procfs initialised\n");
+ printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision));
return (0);
} /* hysdn_procconf_init */
diff --git a/trunk/drivers/md/linear.c b/trunk/drivers/md/linear.c
index 0ed7f6bc2a7f..8a2f767f26d8 100644
--- a/trunk/drivers/md/linear.c
+++ b/trunk/drivers/md/linear.c
@@ -216,6 +216,7 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
+ mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c
index 818313e277e7..b76cfc89e1b5 100644
--- a/trunk/drivers/md/md.c
+++ b/trunk/drivers/md/md.c
@@ -287,7 +287,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
mddev_t *mddev = q->queuedata;
int rv;
int cpu;
- unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
@@ -312,16 +311,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
atomic_inc(&mddev->active_io);
rcu_read_unlock();
- /*
- * save the sectors now since our bio can
- * go away inside make_request
- */
- sectors = bio_sectors(bio);
rv = mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -553,9 +548,6 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
- if (unit && MAJOR(unit) != MD_MAJOR)
- unit &= ~((1<flags);
rdev->bdev = bdev;
return err;
}
@@ -2471,9 +2465,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (rdev->raid_disk != -1)
return -EBUSY;
- if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
- return -EBUSY;
-
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
@@ -2619,11 +2610,12 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
mddev_lock(mddev);
list_for_each_entry(rdev2, &mddev->disks, same_set)
- if (rdev->bdev == rdev2->bdev &&
- rdev != rdev2 &&
- overlaps(rdev->data_offset, rdev->sectors,
- rdev2->data_offset,
- rdev2->sectors)) {
+ if (test_bit(AllReserved, &rdev2->flags) ||
+ (rdev->bdev == rdev2->bdev &&
+ rdev != rdev2 &&
+ overlaps(rdev->data_offset, rdev->sectors,
+ rdev2->data_offset,
+ rdev2->sectors))) {
overlap = 1;
break;
}
@@ -4141,10 +4133,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
- if (mddev->pers) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ if (mddev->pers)
revalidate_disk(mddev->gendisk);
- }
+
return len;
}
@@ -4627,7 +4619,6 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
- mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@@ -4716,7 +4707,6 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
- mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@@ -4832,7 +4822,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
- mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@@ -5589,8 +5578,6 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
mddev->delta_disks = raid_disks - mddev->raid_disks;
rv = mddev->pers->check_reshape(mddev);
- if (rv < 0)
- mddev->delta_disks = 0;
return rv;
}
@@ -6017,7 +6004,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
+ check_disk_size_change(mddev->gendisk, bdev);
out:
return err;
}
@@ -6032,21 +6019,6 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-
-static int md_media_changed(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- mddev->changed = 0;
- return 0;
-}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -6057,8 +6029,6 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -7015,6 +6985,9 @@ void md_do_sync(mddev_t *mddev)
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync_completed = 0;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -7055,7 +7028,7 @@ static int remove_and_add_spares(mddev_t *mddev)
}
}
- if (mddev->degraded && !mddev->recovery_disabled) {
+ if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
@@ -7178,20 +7151,7 @@ void md_check_recovery(mddev_t *mddev)
/* Only thing we do on a ro array is remove
* failed devices.
*/
- mdk_rdev_t *rdev;
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0 &&
- !test_bit(Blocked, &rdev->flags) &&
- test_bit(Faulty, &rdev->flags) &&
- atomic_read(&rdev->nr_pending)==0) {
- if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- rdev->raid_disk = -1;
- }
- }
+ remove_and_add_spares(mddev);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
diff --git a/trunk/drivers/md/md.h b/trunk/drivers/md/md.h
index 12215d437fcc..eec517ced31a 100644
--- a/trunk/drivers/md/md.h
+++ b/trunk/drivers/md/md.h
@@ -93,6 +93,8 @@ struct mdk_rdev_s
#define Faulty 1 /* device is known to have a fault */
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
+#define AllReserved 6 /* If whole device is reserved for
+ * one array */
#define AutoDetected 7 /* added by auto-detect */
#define Blocked 8 /* An error occured on an externally
* managed array, don't allow writes
@@ -274,8 +276,6 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
- int changed; /* True if we might need to
- * reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
diff --git a/trunk/drivers/md/multipath.c b/trunk/drivers/md/multipath.c
index 3a62d440e27b..6d7ddf32ef2e 100644
--- a/trunk/drivers/md/multipath.c
+++ b/trunk/drivers/md/multipath.c
@@ -435,6 +435,7 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
+ mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/trunk/drivers/md/raid0.c b/trunk/drivers/md/raid0.c
index c0ac457f1218..a39f4c355e55 100644
--- a/trunk/drivers/md/raid0.c
+++ b/trunk/drivers/md/raid0.c
@@ -179,14 +179,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
rdev1->new_raid_disk = j;
}
- if (mddev->level == 1) {
- /* taiking over a raid1 array-
- * we have only one active disk
- */
- j = 0;
- rdev1->new_raid_disk = j;
- }
-
if (j < 0 || j >= mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
"aborting!\n", mdname(mddev), j);
@@ -361,6 +353,7 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -651,39 +644,12 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
return priv_conf;
}
-static void *raid0_takeover_raid1(mddev_t *mddev)
-{
- raid0_conf_t *priv_conf;
-
- /* Check layout:
- * - (N - 1) mirror drives must be already faulty
- */
- if ((mddev->raid_disks - 1) != mddev->degraded) {
- printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
- mdname(mddev));
- return ERR_PTR(-EINVAL);
- }
-
- /* Set new parameters */
- mddev->new_level = 0;
- mddev->new_layout = 0;
- mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
- mddev->delta_disks = 1 - mddev->raid_disks;
- mddev->raid_disks = 1;
- /* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
-
- create_strip_zones(mddev, &priv_conf);
- return priv_conf;
-}
-
static void *raid0_takeover(mddev_t *mddev)
{
/* raid0 can take over:
* raid4 - if all data disks are active.
* raid5 - providing it is Raid4 layout and one disk is faulty
* raid10 - assuming we have all necessary active disks
- * raid1 - with (N -1) mirror drives faulty
*/
if (mddev->level == 4)
return raid0_takeover_raid45(mddev);
@@ -699,12 +665,6 @@ static void *raid0_takeover(mddev_t *mddev)
if (mddev->level == 10)
return raid0_takeover_raid10(mddev);
- if (mddev->level == 1)
- return raid0_takeover_raid1(mddev);
-
- printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
- mddev->level);
-
return ERR_PTR(-EINVAL);
}
diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c
index 06cd712807d0..a23ffa397ba9 100644
--- a/trunk/drivers/md/raid1.c
+++ b/trunk/drivers/md/raid1.c
@@ -593,10 +593,7 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- /* Only take the spinlock to quiet a warning */
- spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
- spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -962,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device_unlocked(mddev->queue);
+ blk_plug_device(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2024,6 +2021,7 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
+ mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c
index 747d061d8e05..69b659544390 100644
--- a/trunk/drivers/md/raid10.c
+++ b/trunk/drivers/md/raid10.c
@@ -662,10 +662,7 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- /* Spinlock only taken to quiet a warning */
- spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
- spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -974,7 +971,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device_unlocked(mddev->queue);
+ blk_plug_device(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -2307,6 +2304,8 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
+ mddev->queue->queue_lock = &conf->device_lock;
+
mddev->thread = conf->thread;
conf->thread = NULL;
@@ -2464,13 +2463,11 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
conf = setup_conf(mddev);
- if (!IS_ERR(conf)) {
+ if (!IS_ERR(conf))
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0)
rdev->new_raid_disk = rdev->raid_disk * 2;
- conf->barrier = 1;
- }
-
+
return conf;
}
diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c
index 78536fdbd87f..5044babfcda0 100644
--- a/trunk/drivers/md/raid5.c
+++ b/trunk/drivers/md/raid5.c
@@ -5204,6 +5204,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
+ mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
@@ -5516,6 +5517,7 @@ static int raid5_start_reshape(mddev_t *mddev)
raid5_conf_t *conf = mddev->private;
mdk_rdev_t *rdev;
int spares = 0;
+ int added_devices = 0;
unsigned long flags;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5525,8 +5527,8 @@ static int raid5_start_reshape(mddev_t *mddev)
return -ENOSPC;
list_for_each_entry(rdev, &mddev->disks, same_set)
- if (!test_bit(In_sync, &rdev->flags)
- && !test_bit(Faulty, &rdev->flags))
+ if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks)
+ && !test_bit(Faulty, &rdev->flags))
spares++;
if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5569,35 +5571,34 @@ static int raid5_start_reshape(mddev_t *mddev)
* to correctly record the "partially reconstructed" state of
* such devices during the reshape and confusion could result.
*/
- if (mddev->delta_disks >= 0) {
- int added_devices = 0;
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk < 0 &&
- !test_bit(Faulty, &rdev->flags)) {
- if (raid5_add_disk(mddev, rdev) == 0) {
- char nm[20];
- if (rdev->raid_disk
- >= conf->previous_raid_disks) {
- set_bit(In_sync, &rdev->flags);
- added_devices++;
- } else
- rdev->recovery_offset = 0;
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
- /* Failure here is OK */;
- }
- } else if (rdev->raid_disk >= conf->previous_raid_disks
- && !test_bit(Faulty, &rdev->flags)) {
- /* This is a spare that was manually added */
- set_bit(In_sync, &rdev->flags);
- added_devices++;
- }
+ if (mddev->delta_disks >= 0)
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk < 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ if (raid5_add_disk(mddev, rdev) == 0) {
+ char nm[20];
+ if (rdev->raid_disk >= conf->previous_raid_disks) {
+ set_bit(In_sync, &rdev->flags);
+ added_devices++;
+ } else
+ rdev->recovery_offset = 0;
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ if (sysfs_create_link(&mddev->kobj,
+ &rdev->kobj, nm))
+ /* Failure here is OK */;
+ } else
+ break;
+ } else if (rdev->raid_disk >= conf->previous_raid_disks
+ && !test_bit(Faulty, &rdev->flags)) {
+ /* This is a spare that was manually added */
+ set_bit(In_sync, &rdev->flags);
+ added_devices++;
+ }
- /* When a reshape changes the number of devices,
- * ->degraded is measured against the larger of the
- * pre and post number of devices.
- */
+ /* When a reshape changes the number of devices, ->degraded
+ * is measured against the larger of the pre and post number of
+ * devices.*/
+ if (mddev->delta_disks > 0) {
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- added_devices;
diff --git a/trunk/drivers/media/common/tuners/tda8290.c b/trunk/drivers/media/common/tuners/tda8290.c
index 8c4852114eeb..bc6a67768af1 100644
--- a/trunk/drivers/media/common/tuners/tda8290.c
+++ b/trunk/drivers/media/common/tuners/tda8290.c
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
#define TDA8290_ID 0x89
u8 reg = 0x1f, id;
struct i2c_msg msg_read[] = {
- { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® },
- { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ { .addr = 0x4b, .flags = 0, .len = 1, .buf = ® },
+ { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
/* detect tda8290 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
- printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
+ printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
#define TDA8295C2_ID 0x8b
u8 reg = 0x2f, id;
struct i2c_msg msg_read[] = {
- { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® },
- { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ { .addr = 0x4b, .flags = 0, .len = 1, .buf = ® },
+ { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
- /* detect tda8295 */
+ /* detect tda8290 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
- printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
+ printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
diff --git a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 193cdb77b76a..defd83964ce2 100644
--- a/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/trunk/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -870,23 +870,6 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
- u16 pid, int onoff)
-{
- struct dib0700_state *st = adapter->dev->priv;
- if (st->is_dib7000pc)
- return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
- return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
-}
-
-static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
-{
- struct dib0700_state *st = adapter->dev->priv;
- if (st->is_dib7000pc)
- return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
- return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
-}
-
static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
{
return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1892,8 +1875,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
- .pid_filter = stk7700p_pid_filter,
- .pid_filter_ctrl = stk7700p_pid_filter_ctrl,
+ .pid_filter = stk70x0p_pid_filter,
+ .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
.frontend_attach = stk7700p_frontend_attach,
.tuner_attach = stk7700p_tuner_attach,
diff --git a/trunk/drivers/media/dvb/dvb-usb/lmedm04.c b/trunk/drivers/media/dvb/dvb-usb/lmedm04.c
index 46ccd01a7696..9eea4188303b 100644
--- a/trunk/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/trunk/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
}
/* Default firmware for LME2510C */
-char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
static void lme_coldreset(struct usb_device *dev)
{
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.download_firmware = lme2510_download_firmware,
- .firmware = (const char *)&lme_firmware,
+ .firmware = lme_firmware,
.size_of_priv = sizeof(struct lme2510_state),
.num_adapters = 1,
.adapter = {
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit);
MODULE_AUTHOR("Malcolm Priestley ");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.75");
+MODULE_VERSION("1.74");
MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/media/dvb/frontends/dib7000m.c b/trunk/drivers/media/dvb/frontends/dib7000m.c
index 289a79837f24..c7f5ccf54aa5 100644
--- a/trunk/drivers/media/dvb/frontends/dib7000m.c
+++ b/trunk/drivers/media/dvb/frontends/dib7000m.c
@@ -1285,25 +1285,6 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di
}
EXPORT_SYMBOL(dib7000m_get_i2c_master);
-int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
-{
- struct dib7000m_state *state = fe->demodulator_priv;
- u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
- val |= (onoff & 0x1) << 4;
- dprintk("PID filter enabled %d", onoff);
- return dib7000m_write_word(state, 294 + state->reg_offs, val);
-}
-EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
-
-int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
-{
- struct dib7000m_state *state = fe->demodulator_priv;
- dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
- return dib7000m_write_word(state, 300 + state->reg_offs + id,
- onoff ? (1 << 13) | pid : 0);
-}
-EXPORT_SYMBOL(dib7000m_pid_filter);
-
#if 0
/* used with some prototype boards */
int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
diff --git a/trunk/drivers/media/dvb/frontends/dib7000m.h b/trunk/drivers/media/dvb/frontends/dib7000m.h
index 81fcf2241c64..113819ce9f0d 100644
--- a/trunk/drivers/media/dvb/frontends/dib7000m.h
+++ b/trunk/drivers/media/dvb/frontends/dib7000m.h
@@ -46,8 +46,6 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
enum dibx000_i2c_interface,
int);
-extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
-extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
#else
static inline
struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -65,19 +63,6 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod,
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
- u16 pid, u8 onoff)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
-
-static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
- uint8_t onoff)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
#endif
/* TODO
diff --git a/trunk/drivers/media/dvb/mantis/mantis_pci.c b/trunk/drivers/media/dvb/mantis/mantis_pci.c
index 10a432a79d00..59feeb84aec7 100644
--- a/trunk/drivers/media/dvb/mantis/mantis_pci.c
+++ b/trunk/drivers/media/dvb/mantis/mantis_pci.c
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include
#include
#include