diff --git a/[refs] b/[refs]
index 6bac56dee8f2..70e38fe8b808 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: b6f7e38dbb310557fe890b04b1a376c93f638c3b
+refs/heads/master: bc83cccc761953f878088cdfa682de0970b5561f
diff --git a/trunk/CREDITS b/trunk/CREDITS
index 41d8e63d5165..72b487869788 100644
--- a/trunk/CREDITS
+++ b/trunk/CREDITS
@@ -3554,12 +3554,12 @@ E: cvance@nai.com
D: portions of the Linux Security Module (LSM) framework and security modules
N: Petr Vandrovec
-E: petr@vandrovec.name
+E: vandrove@vc.cvut.cz
D: Small contributions to ncpfs
D: Matrox framebuffer driver
-S: 21513 Conradia Ct
-S: Cupertino, CA 95014
-S: USA
+S: Chudenicka 8
+S: 10200 Prague 10, Hostivar
+S: Czech Republic
N: Thibaut Varene
E: T-Bone@parisc-linux.org
diff --git a/trunk/Documentation/DocBook/kernel-locking.tmpl b/trunk/Documentation/DocBook/kernel-locking.tmpl
index f66f4df18690..a0d479d1e1dd 100644
--- a/trunk/Documentation/DocBook/kernel-locking.tmpl
+++ b/trunk/Documentation/DocBook/kernel-locking.tmpl
@@ -1645,9 +1645,7 @@ the amount of locking which needs to be done.
all the readers who were traversing the list when we deleted the
element are finished. We use call_rcu() to
register a callback which will actually destroy the object once
- all pre-existing readers are finished. Alternatively,
- synchronize_rcu() may be used to block until
- all pre-existing are finished.
+ the readers are finished.
But how does Read Copy Update know when the readers are
@@ -1716,7 +1714,7 @@ the amount of locking which needs to be done.
- object_put(obj);
+ list_del_rcu(&obj->list);
cache_num--;
-+ call_rcu(&obj->rcu, cache_delete_rcu);
++ call_rcu(&obj->rcu, cache_delete_rcu, obj);
}
/* Must be holding cache_lock */
@@ -1727,6 +1725,14 @@ the amount of locking which needs to be done.
if (++cache_num > MAX_CACHE_SIZE) {
struct object *i, *outcast = NULL;
list_for_each_entry(i, &cache, list) {
+@@ -85,6 +94,7 @@
+ obj->popularity = 0;
+ atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+ spin_lock_init(&obj->lock);
++ INIT_RCU_HEAD(&obj->rcu);
+
+ spin_lock_irqsave(&cache_lock, flags);
+ __cache_add(obj);
@@ -104,12 +114,11 @@
struct object *cache_find(int id)
{
diff --git a/trunk/Documentation/RCU/checklist.txt b/trunk/Documentation/RCU/checklist.txt
index 0c134f8afc6f..790d1a812376 100644
--- a/trunk/Documentation/RCU/checklist.txt
+++ b/trunk/Documentation/RCU/checklist.txt
@@ -218,22 +218,13 @@ over a rather long period of time, but improvements are always welcome!
include:
a. Keeping a count of the number of data-structure elements
- used by the RCU-protected data structure, including
- those waiting for a grace period to elapse. Enforce a
- limit on this number, stalling updates as needed to allow
- previously deferred frees to complete. Alternatively,
- limit only the number awaiting deferred free rather than
- the total number of elements.
-
- One way to stall the updates is to acquire the update-side
- mutex. (Don't try this with a spinlock -- other CPUs
- spinning on the lock could prevent the grace period
- from ever ending.) Another way to stall the updates
- is for the updates to use a wrapper function around
- the memory allocator, so that this wrapper function
- simulates OOM when there is too much memory awaiting an
- RCU grace period. There are of course many other
- variations on this theme.
+ used by the RCU-protected data structure, including those
+ waiting for a grace period to elapse. Enforce a limit
+ on this number, stalling updates as needed to allow
+ previously deferred frees to complete.
+
+ Alternatively, limit only the number awaiting deferred
+ free rather than the total number of elements.
b. Limiting update rate. For example, if updates occur only
once per hour, then no explicit rate limiting is required,
@@ -374,26 +365,3 @@ over a rather long period of time, but improvements are always welcome!
and the compiler to freely reorder code into and out of RCU
read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this.
-
-17. Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
- the __rcu sparse checks to validate your RCU code. These
- can help find problems as follows:
-
- CONFIG_PROVE_RCU: check that accesses to RCU-protected data
- structures are carried out under the proper RCU
- read-side critical section, while holding the right
- combination of locks, or whatever other conditions
- are appropriate.
-
- CONFIG_DEBUG_OBJECTS_RCU_HEAD: check that you don't pass the
- same object to call_rcu() (or friends) before an RCU
- grace period has elapsed since the last time that you
- passed that same object to call_rcu() (or friends).
-
- __rcu sparse checks: tag the pointer to the RCU-protected data
- structure with __rcu, and sparse will warn you if you
- access that pointer without the services of one of the
- variants of rcu_dereference().
-
- These debugging aids can help you find problems that are
- otherwise extremely difficult to spot.
diff --git a/trunk/Documentation/RCU/stallwarn.txt b/trunk/Documentation/RCU/stallwarn.txt
index 862c08ef1fde..44c6dcc93d6d 100644
--- a/trunk/Documentation/RCU/stallwarn.txt
+++ b/trunk/Documentation/RCU/stallwarn.txt
@@ -80,24 +80,6 @@ o A CPU looping with bottom halves disabled. This condition can
o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
without invoking schedule().
-o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
- happen to preempt a low-priority task in the middle of an RCU
- read-side critical section. This is especially damaging if
- that low-priority task is not permitted to run on any other CPU,
- in which case the next RCU grace period can never complete, which
- will eventually cause the system to run out of memory and hang.
- While the system is in the process of running itself out of
- memory, you might see stall-warning messages.
-
-o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
- is running at a higher priority than the RCU softirq threads.
- This will prevent RCU callbacks from ever being invoked,
- and in a CONFIG_TREE_PREEMPT_RCU kernel will further prevent
- RCU grace periods from ever completing. Either way, the
- system will eventually run out of memory and hang. In the
- CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
- messages.
-
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
diff --git a/trunk/Documentation/RCU/trace.txt b/trunk/Documentation/RCU/trace.txt
index a851118775d8..efd8cc95c06b 100644
--- a/trunk/Documentation/RCU/trace.txt
+++ b/trunk/Documentation/RCU/trace.txt
@@ -125,17 +125,6 @@ o "b" is the batch limit for this CPU. If more than this number
of RCU callbacks is ready to invoke, then the remainder will
be deferred.
-o "ci" is the number of RCU callbacks that have been invoked for
- this CPU. Note that ci+ql is the number of callbacks that have
- been registered in absence of CPU-hotplug activity.
-
-o "co" is the number of RCU callbacks that have been orphaned due to
- this CPU going offline.
-
-o "ca" is the number of RCU callbacks that have been adopted due to
- other CPUs going offline. Note that ci+co-ca+ql is the number of
- RCU callbacks registered on this CPU.
-
There is also an rcu/rcudata.csv file with the same information in
comma-separated-variable spreadsheet format.
@@ -191,7 +180,7 @@ o "s" is the "signaled" state that drives force_quiescent_state()'s
o "jfq" is the number of jiffies remaining for this grace period
before force_quiescent_state() is invoked to help push things
- along. Note that CPUs in dyntick-idle mode throughout the grace
+ along. Note that CPUs in dyntick-idle mode thoughout the grace
period will not report on their own, but rather must be check by
some other CPU via force_quiescent_state().
diff --git a/trunk/Documentation/cputopology.txt b/trunk/Documentation/cputopology.txt
index 902d3151f527..f1c5c4bccd3e 100644
--- a/trunk/Documentation/cputopology.txt
+++ b/trunk/Documentation/cputopology.txt
@@ -14,39 +14,25 @@ to /proc/cpuinfo.
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-3) /sys/devices/system/cpu/cpuX/topology/book_id:
-
- the book ID of cpuX. Typically it is the hardware platform's
- identifier (rather than the kernel's). The actual value is
- architecture and platform dependent.
-
-4) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
+3) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
internel kernel map of cpuX's hardware threads within the same
core as cpuX
-5) /sys/devices/system/cpu/cpuX/topology/core_siblings:
+4) /sys/devices/system/cpu/cpuX/topology/core_siblings:
internal kernel map of cpuX's hardware threads within the same
physical_package_id.
-6) /sys/devices/system/cpu/cpuX/topology/book_siblings:
-
- internal kernel map of cpuX's hardware threads within the same
- book_id.
-
To implement it in an architecture-neutral way, a new source file,
-drivers/base/topology.c, is to export the 4 or 6 attributes. The two book
-related sysfs files will only be created if CONFIG_SCHED_BOOK is selected.
+drivers/base/topology.c, is to export the 4 attributes.
For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h:
#define topology_physical_package_id(cpu)
#define topology_core_id(cpu)
-#define topology_book_id(cpu)
#define topology_thread_cpumask(cpu)
#define topology_core_cpumask(cpu)
-#define topology_book_cpumask(cpu)
The type of **_id is int.
The type of siblings is (const) struct cpumask *.
@@ -59,9 +45,6 @@ not defined by include/asm-XXX/topology.h:
3) thread_siblings: just the given CPU
4) core_siblings: just the given CPU
-For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
-default definitions for topology_book_id() and topology_book_cpumask().
-
Additionally, CPU topology information is provided under
/sys/devices/system/cpu and includes these files. The internal
source for the output is in brackets ("[]").
diff --git a/trunk/Documentation/hwmon/sysfs-interface b/trunk/Documentation/hwmon/sysfs-interface
index 48ceabedf55d..ff45d1f837c8 100644
--- a/trunk/Documentation/hwmon/sysfs-interface
+++ b/trunk/Documentation/hwmon/sysfs-interface
@@ -91,11 +91,12 @@ name The chip name.
I2C devices get this attribute created automatically.
RO
-update_interval The interval at which the chip will update readings.
+update_rate The rate at which the chip will update readings.
Unit: millisecond
RW
- Some devices have a variable update rate or interval.
- This attribute can be used to change it to the desired value.
+ Some devices have a variable update rate. This attribute
+ can be used to change the update rate to the desired
+ frequency.
************
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index a2ffd6be0ef7..8dd7248508a9 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -2153,11 +2153,6 @@ and is between 256 and 4096 characters. It is defined in the file
Reserves a hole at the top of the kernel virtual
address space.
- reservelow= [X86]
- Format: nn[K]
- Set the amount of memory to reserve for BIOS at
- the bottom of the address space.
-
reset_devices [KNL] Force drivers to reset the underlying device
during initialization.
@@ -2440,10 +2435,6 @@ and is between 256 and 4096 characters. It is defined in the file
disables clocksource verification at runtime.
Used to enable high-resolution timer mode on older
hardware, and in virtualized environment.
- [x86] noirqtime: Do not use TSC to do irq accounting.
- Used to run time disable IRQ_TIME_ACCOUNTING on any
- platforms where RDTSC is slow and this accounting
- can add overhead.
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
diff --git a/trunk/Documentation/kprobes.txt b/trunk/Documentation/kprobes.txt
index 741fe66d6eca..1762b81fcdf2 100644
--- a/trunk/Documentation/kprobes.txt
+++ b/trunk/Documentation/kprobes.txt
@@ -542,11 +542,9 @@ Kprobes does not use mutexes or allocate memory except during
registration and unregistration.
Probe handlers are run with preemption disabled. Depending on the
-architecture and optimization state, handlers may also run with
-interrupts disabled (e.g., kretprobe handlers and optimized kprobe
-handlers run without interrupt disabled on x86/x86-64). In any case,
-your handler should not yield the CPU (e.g., by attempting to acquire
-a semaphore).
+architecture, handlers may also run with interrupts disabled. In any
+case, your handler should not yield the CPU (e.g., by attempting to
+acquire a semaphore).
Since a return probe is implemented by replacing the return
address with the trampoline's address, stack backtraces and calls
diff --git a/trunk/Documentation/networking/e1000.txt b/trunk/Documentation/networking/e1000.txt
index d9271e74e488..2df71861e578 100644
--- a/trunk/Documentation/networking/e1000.txt
+++ b/trunk/Documentation/networking/e1000.txt
@@ -1,35 +1,82 @@
Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
===============================================================
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+September 26, 2006
+
Contents
========
+- In This Release
- Identifying Your Adapter
+- Building and Installation
- Command Line Parameters
- Speed and Duplex Configuration
- Additional Configurations
+- Known Issues
- Support
+
+In This Release
+===============
+
+This file describes the Linux* Base Driver for the Intel(R) PRO/1000 Family
+of Adapters. This driver includes support for Itanium(R)2-based systems.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel PRO/1000 adapter. All hardware requirements listed
+apply to use with Linux.
+
+The following features are now available in supported kernels:
+ - Native VLANs
+ - Channel Bonding (teaming)
+ - SNMP
+
+Channel Bonding documentation can be found in the Linux kernel source:
+/Documentation/networking/bonding.txt
+
+The driver information previously displayed in the /proc filesystem is not
+supported in this release. Alternatively, you can use ethtool (version 1.6
+or later), lspci, and ifconfig to obtain the same information.
+
+Instructions on updating ethtool can be found in the section "Additional
+Configurations" later in this document.
+
+NOTE: The Intel(R) 82562v 10/100 Network Connection only provides 10/100
+support.
+
+
Identifying Your Adapter
========================
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
- http://support.intel.com/support/go/network/adapter/idguide.htm
+ http://support.intel.com/support/network/adapter/pro100/21397.htm
For the latest Intel network drivers for Linux, refer to the following
website. In the search field, enter your adapter name or type, or use the
networking link on the left to search for your adapter:
- http://support.intel.com/support/go/network/adapter/home.htm
+ http://downloadfinder.intel.com/scripts-df/support_intel.asp
+
Command Line Parameters
=======================
+If the driver is built as a module, the following optional parameters
+are used by entering them on the command line with the modprobe command
+using this syntax:
+
+ modprobe e1000 [=,,...]
+
+For example, with two PRO/1000 PCI adapters, entering:
+
+ modprobe e1000 TxDescriptors=80,128
+
+loads the e1000 driver with 80 TX descriptors for the first adapter and
+128 TX descriptors for the second adapter.
+
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
@@ -42,6 +89,10 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed
parameters, see the application note at:
http://www.intel.com/design/network/applnots/ap450.htm
+ A descriptor describes a data buffer and attributes related to
+ the data buffer. This information is accessed by the hardware.
+
+
AutoNeg
-------
(Supported only on adapters with copper connections)
@@ -55,6 +106,7 @@ Duplex parameters must not be specified.
NOTE: Refer to the Speed and Duplex section of this readme for more
information on the AutoNeg parameter.
+
Duplex
------
(Supported only on adapters with copper connections)
@@ -67,6 +119,7 @@ set to auto-negotiate, the board auto-detects the correct duplex. If the
link partner is forced (either full or half), Duplex defaults to half-
duplex.
+
FlowControl
-----------
Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
@@ -75,16 +128,16 @@ Default Value: Reads flow control settings from the EEPROM
This parameter controls the automatic generation(Tx) and response(Rx)
to Ethernet PAUSE frames.
+
InterruptThrottleRate
---------------------
(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
- 4=simplified balancing)
+Valid Range: 0,1,3,100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
Default Value: 3
The driver can limit the amount of interrupts per second that the adapter
-will generate for incoming packets. It does this by writing a value to the
-adapter that is based on the maximum amount of interrupts that the adapter
+will generate for incoming packets. It does this by writing a value to the
+adapter that is based on the maximum amount of interrupts that the adapter
will generate per second.
Setting InterruptThrottleRate to a value greater or equal to 100
@@ -93,43 +146,37 @@ per second, even if more packets have come in. This reduces interrupt
load on the system and can lower CPU utilization under heavy load,
but will increase latency as packets are not processed as quickly.
-The default behaviour of the driver previously assumed a static
-InterruptThrottleRate value of 8000, providing a good fallback value for
-all traffic types,but lacking in small packet performance and latency.
-The hardware can handle many more small packets per second however, and
+The default behaviour of the driver previously assumed a static
+InterruptThrottleRate value of 8000, providing a good fallback value for
+all traffic types,but lacking in small packet performance and latency.
+The hardware can handle many more small packets per second however, and
for this reason an adaptive interrupt moderation algorithm was implemented.
Since 7.3.x, the driver has two adaptive modes (setting 1 or 3) in which
-it dynamically adjusts the InterruptThrottleRate value based on the traffic
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
that it receives. After determining the type of incoming traffic in the last
-timeframe, it will adjust the InterruptThrottleRate to an appropriate value
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
for that traffic.
The algorithm classifies the incoming traffic every interval into
-classes. Once the class is determined, the InterruptThrottleRate value is
-adjusted to suit that traffic type the best. There are three classes defined:
+classes. Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
"Bulk traffic", for large amounts of packets of normal size; "Low latency",
for small amounts of traffic and/or a significant percentage of small
-packets; and "Lowest latency", for almost completely small packets or
+packets; and "Lowest latency", for almost completely small packets or
minimal traffic.
-In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
-for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
-latency" or "Lowest latency" class, the InterruptThrottleRate is increased
+In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
+for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
+latency" or "Lowest latency" class, the InterruptThrottleRate is increased
stepwise to 20000. This default mode is suitable for most applications.
For situations where low latency is vital such as cluster or
grid computing, the algorithm can reduce latency even more when
InterruptThrottleRate is set to mode 1. In this mode, which operates
-the same as mode 3, the InterruptThrottleRate will be increased stepwise to
+the same as mode 3, the InterruptThrottleRate will be increased stepwise to
70000 for traffic in class "Lowest latency".
-In simplified mode the interrupt rate is based on the ratio of Tx and
-Rx traffic. If the bytes per second rate is approximately equal, the
-interrupt rate will drop as low as 2000 interrupts per second. If the
-traffic is mostly transmit or mostly receive, the interrupt rate could
-be as high as 8000.
-
Setting InterruptThrottleRate to 0 turns off any interrupt moderation
and may improve small packet latency, but is generally not suitable
for bulk throughput traffic.
@@ -165,6 +212,8 @@ NOTE: When e1000 is loaded with default settings and multiple adapters
be platform-specific. If CPU utilization is not a concern, use
RX_POLLING (NAPI) and default driver settings.
+
+
RxDescriptors
-------------
Valid Range: 80-256 for 82542 and 82543-based adapters
@@ -176,14 +225,15 @@ by the driver. Increasing this value allows the driver to buffer more
incoming packets, at the expense of increased system memory utilization.
Each descriptor is 16 bytes. A receive buffer is also allocated for each
-descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
+descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
on the MTU setting. The maximum MTU size is 16110.
-NOTE: MTU designates the frame size. It only needs to be set for Jumbo
- Frames. Depending on the available system resources, the request
- for a higher number of receive descriptors may be denied. In this
+NOTE: MTU designates the frame size. It only needs to be set for Jumbo
+ Frames. Depending on the available system resources, the request
+ for a higher number of receive descriptors may be denied. In this
case, use a lower number.
+
RxIntDelay
----------
Valid Range: 0-65535 (0=off)
@@ -204,6 +254,7 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may
restoring the network connection. To eliminate the potential
for the hang ensure that RxIntDelay is set to 0.
+
RxAbsIntDelay
-------------
(This parameter is supported only on 82540, 82545 and later adapters.)
@@ -217,6 +268,7 @@ packet is received within the set amount of time. Proper tuning,
along with RxIntDelay, may improve traffic throughput in specific network
conditions.
+
Speed
-----
(This parameter is supported only on adapters with copper connections.)
@@ -228,6 +280,7 @@ Speed forces the line speed to the specified value in megabits per second
partner is set to auto-negotiate, the board will auto-detect the correct
speed. Duplex should also be set when Speed is set to either 10 or 100.
+
TxDescriptors
-------------
Valid Range: 80-256 for 82542 and 82543-based adapters
@@ -242,36 +295,6 @@ NOTE: Depending on the available system resources, the request for a
higher number of transmit descriptors may be denied. In this case,
use a lower number.
-TxDescriptorStep
-----------------
-Valid Range: 1 (use every Tx Descriptor)
- 4 (use every 4th Tx Descriptor)
-
-Default Value: 1 (use every Tx Descriptor)
-
-On certain non-Intel architectures, it has been observed that intense TX
-traffic bursts of short packets may result in an improper descriptor
-writeback. If this occurs, the driver will report a "TX Timeout" and reset
-the adapter, after which the transmit flow will restart, though data may
-have stalled for as much as 10 seconds before it resumes.
-
-The improper writeback does not occur on the first descriptor in a system
-memory cache-line, which is typically 32 bytes, or 4 descriptors long.
-
-Setting TxDescriptorStep to a value of 4 will ensure that all TX descriptors
-are aligned to the start of a system memory cache line, and so this problem
-will not occur.
-
-NOTES: Setting TxDescriptorStep to 4 effectively reduces the number of
- TxDescriptors available for transmits to 1/4 of the normal allocation.
- This has a possible negative performance impact, which may be
- compensated for by allocating more descriptors using the TxDescriptors
- module parameter.
-
- There are other conditions which may result in "TX Timeout", which will
- not be resolved by the use of the TxDescriptorStep parameter. As the
- issue addressed by this parameter has never been observed on Intel
- Architecture platforms, it should not be used on Intel platforms.
TxIntDelay
----------
@@ -284,6 +307,7 @@ efficiency if properly tuned for specific network traffic. If the
system is reporting dropped transmits, this value may be set too high
causing the driver to run out of available transmit descriptors.
+
TxAbsIntDelay
-------------
(This parameter is supported only on 82540, 82545 and later adapters.)
@@ -306,35 +330,6 @@ Default Value: 1
A value of '1' indicates that the driver should enable IP checksum
offload for received packets (both UDP and TCP) to the adapter hardware.
-Copybreak
----------
-Valid Range: 0-xxxxxxx (0=off)
-Default Value: 256
-Usage: insmod e1000.ko copybreak=128
-
-Driver copies all packets below or equaling this size to a fresh Rx
-buffer before handing it up the stack.
-
-This parameter is different than other parameters, in that it is a
-single (not 1,1,1 etc.) parameter applied to all driver instances and
-it is also available during runtime at
-/sys/module/e1000/parameters/copybreak
-
-SmartPowerDownEnable
---------------------
-Valid Range: 0-1
-Default Value: 0 (disabled)
-
-Allows PHY to turn off in lower power states. The user can turn off
-this parameter in supported chipsets.
-
-KumeranLockLoss
----------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
-
-This workaround skips resetting the PHY at shutdown for the initial
-silicon releases of ICH8 systems.
Speed and Duplex Configuration
==============================
@@ -390,9 +385,40 @@ If the link partner is forced to a specific speed and duplex, then this
parameter should not be used. Instead, use the Speed and Duplex parameters
previously mentioned to force the adapter to the same speed and duplex.
+
Additional Configurations
=========================
+ Configuring the Driver on Different Distributions
+ -------------------------------------------------
+ Configuring a network driver to load properly when the system is started
+ is distribution dependent. Typically, the configuration process involves
+ adding an alias line to /etc/modules.conf or /etc/modprobe.conf as well
+ as editing other system startup scripts and/or configuration files. Many
+ popular Linux distributions ship with tools to make these changes for you.
+ To learn the proper way to configure a network device for your system,
+ refer to your distribution documentation. If during this process you are
+ asked for the driver or module name, the name for the Linux Base Driver
+ for the Intel(R) PRO/1000 Family of Adapters is e1000.
+
+ As an example, if you install the e1000 driver for two PRO/1000 adapters
+ (eth0 and eth1) and set the speed and duplex to 10full and 100half, add
+ the following to modules.conf or or modprobe.conf:
+
+ alias eth0 e1000
+ alias eth1 e1000
+ options e1000 Speed=10,100 Duplex=2,1
+
+ Viewing Link Messages
+ ---------------------
+ Link messages will not be displayed to the console if the distribution is
+ restricting system messages. In order to see network driver link messages
+ on your console, set dmesg to eight by entering the following:
+
+ dmesg -n 8
+
+ NOTE: This setting is not saved across reboots.
+
Jumbo Frames
------------
Jumbo Frames support is enabled by changing the MTU to a value larger than
@@ -411,11 +437,9 @@ Additional Configurations
setting in a different location.
Notes:
- Degradation in throughput performance may be observed in some Jumbo frames
- environments. If this is observed, increasing the application's socket buffer
- size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
- See the specific application manual and /usr/src/linux*/Documentation/
- networking/ip-sysctl.txt for more details.
+
+ - To enable Jumbo Frames, increase the MTU size on the interface beyond
+ 1500.
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides
with the maximum Jumbo Frames size of 16128.
@@ -423,11 +447,40 @@ Additional Configurations
- Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
loss of link.
+ - Some Intel gigabit adapters that support Jumbo Frames have a frame size
+ limit of 9238 bytes, with a corresponding MTU size limit of 9216 bytes.
+ The adapters with this limitation are based on the Intel(R) 82571EB,
+ 82572EI, 82573L and 80003ES2LAN controller. These correspond to the
+ following product names:
+ Intel(R) PRO/1000 PT Server Adapter
+ Intel(R) PRO/1000 PT Desktop Adapter
+ Intel(R) PRO/1000 PT Network Connection
+ Intel(R) PRO/1000 PT Dual Port Server Adapter
+ Intel(R) PRO/1000 PT Dual Port Network Connection
+ Intel(R) PRO/1000 PF Server Adapter
+ Intel(R) PRO/1000 PF Network Connection
+ Intel(R) PRO/1000 PF Dual Port Server Adapter
+ Intel(R) PRO/1000 PB Server Connection
+ Intel(R) PRO/1000 PL Network Connection
+ Intel(R) PRO/1000 EB Network Connection with I/O Acceleration
+ Intel(R) PRO/1000 EB Backplane Connection with I/O Acceleration
+ Intel(R) PRO/1000 PT Quad Port Server Adapter
+
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
support Jumbo Frames. These correspond to the following product names:
Intel(R) PRO/1000 Gigabit Server Adapter
Intel(R) PRO/1000 PM Network Connection
+ - The following adapters do not support Jumbo Frames:
+ Intel(R) 82562V 10/100 Network Connection
+ Intel(R) 82566DM Gigabit Network Connection
+ Intel(R) 82566DC Gigabit Network Connection
+ Intel(R) 82566MM Gigabit Network Connection
+ Intel(R) 82566MC Gigabit Network Connection
+ Intel(R) 82562GT 10/100 Network Connection
+ Intel(R) 82562G 10/100 Network Connection
+
+
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
@@ -437,14 +490,142 @@ Additional Configurations
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel.
+ NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
+ for a more complete ethtool feature set can be enabled by upgrading
+ ethtool to ethtool-1.8.1.
+
Enabling Wake on LAN* (WoL)
---------------------------
- WoL is configured through the Ethtool* utility.
+ WoL is configured through the Ethtool* utility. Ethtool is included with
+ all versions of Red Hat after Red Hat 7.2. For other Linux distributions,
+ download and install Ethtool from the following website:
+ http://sourceforge.net/projects/gkernel.
+
+ For instructions on enabling WoL with Ethtool, refer to the website listed
+ above.
WoL will be enabled on the system during the next shut down or reboot.
For this driver version, in order to enable WoL, the e1000 driver must be
loaded when shutting down or rebooting the system.
+ Wake On LAN is only supported on port A for the following devices:
+ Intel(R) PRO/1000 PT Dual Port Network Connection
+ Intel(R) PRO/1000 PT Dual Port Server Connection
+ Intel(R) PRO/1000 PT Dual Port Server Adapter
+ Intel(R) PRO/1000 PF Dual Port Server Adapter
+ Intel(R) PRO/1000 PT Quad Port Server Adapter
+
+ NAPI
+ ----
+ NAPI (Rx polling mode) is enabled in the e1000 driver.
+
+ See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
+
+
+Known Issues
+============
+
+Dropped Receive Packets on Half-duplex 10/100 Networks
+------------------------------------------------------
+If you have an Intel PCI Express adapter running at 10mbps or 100mbps, half-
+duplex, you may observe occasional dropped receive packets. There are no
+workarounds for this problem in this network configuration. The network must
+be updated to operate in full-duplex, and/or 1000mbps only.
+
+Jumbo Frames System Requirement
+-------------------------------
+Memory allocation failures have been observed on Linux systems with 64 MB
+of RAM or less that are running Jumbo Frames. If you are using Jumbo
+Frames, your system may require more than the advertised minimum
+requirement of 64 MB of system memory.
+
+Performance Degradation with Jumbo Frames
+-----------------------------------------
+Degradation in throughput performance may be observed in some Jumbo frames
+environments. If this is observed, increasing the application's socket
+buffer size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values
+may help. See the specific application manual and
+/usr/src/linux*/Documentation/
+networking/ip-sysctl.txt for more details.
+
+Jumbo Frames on Foundry BigIron 8000 switch
+-------------------------------------------
+There is a known issue using Jumbo frames when connected to a Foundry
+BigIron 8000 switch. This is a 3rd party limitation. If you experience
+loss of packets, lower the MTU size.
+
+Allocating Rx Buffers when Using Jumbo Frames
+---------------------------------------------
+Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
+the available memory is heavily fragmented. This issue may be seen with PCI-X
+adapters or with packet split disabled. This can be reduced or eliminated
+by changing the amount of available memory for receive buffer allocation, by
+increasing /proc/sys/vm/min_free_kbytes.
+
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
+Due to the default ARP behavior on Linux, it is not possible to have
+one system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected. All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
+
+If you have multiple interfaces in a server, either turn on ARP
+filtering by entering:
+
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+(this only works if your kernel's version is higher than 2.4.5),
+
+NOTE: This setting is not saved across reboots. The configuration
+change can be made permanent by adding the line:
+ net.ipv4.conf.all.arp_filter = 1
+to the file /etc/sysctl.conf
+
+ or,
+
+install the interfaces in separate broadcast domains (either in
+different switches or in a switch partitioned to VLANs).
+
+82541/82547 can't link or are slow to link with some link partners
+-----------------------------------------------------------------
+There is a known compatibility issue with 82541/82547 and some
+low-end switches where the link will not be established, or will
+be slow to establish. In particular, these switches are known to
+be incompatible with 82541/82547:
+
+ Planex FXG-08TE
+ I-O Data ETG-SH8
+
+To workaround this issue, the driver can be compiled with an override
+of the PHY's master/slave setting. Forcing master or forcing slave
+mode will improve time-to-link.
+
+ # make CFLAGS_EXTRA=-DE1000_MASTER_SLAVE=
+
+Where is:
+
+ 0 = Hardware default
+ 1 = Master mode
+ 2 = Slave mode
+ 3 = Auto master/slave
+
+Disable rx flow control with ethtool
+------------------------------------
+In order to disable receive flow control using ethtool, you must turn
+off auto-negotiation on the same command line.
+
+For example:
+
+ ethtool -A eth? autoneg off rx off
+
+Unplugging network cable while ethtool -p is running
+----------------------------------------------------
+In kernel versions 2.5.50 and later (including 2.6 kernel), unplugging
+the network cable while ethtool -p is running will cause the system to
+become unresponsive to keyboard commands, except for control-alt-delete.
+Restarting the system appears to be the only remedy.
+
+
Support
=======
diff --git a/trunk/Documentation/networking/e1000e.txt b/trunk/Documentation/networking/e1000e.txt
deleted file mode 100644
index 6aa048badf32..000000000000
--- a/trunk/Documentation/networking/e1000e.txt
+++ /dev/null
@@ -1,302 +0,0 @@
-Linux* Driver for Intel(R) Network Connection
-===============================================================
-
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Command Line Parameters
-- Additional Configurations
-- Support
-
-Identifying Your Adapter
-========================
-
-The e1000e driver supports all PCI Express Intel(R) Gigabit Network
-Connections, except those that are 82575, 82576 and 82580-based*.
-
-* NOTE: The Intel(R) PRO/1000 P Dual Port Server Adapter is supported by
- the e1000 driver, not the e1000e driver due to the 82546 part being used
- behind a PCI Express bridge.
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/go/network/adapter/idguide.htm
-
-For the latest Intel network drivers for Linux, refer to the following
-website. In the search field, enter your adapter name or type, or use the
-networking link on the left to search for your adapter:
-
- http://support.intel.com/support/go/network/adapter/home.htm
-
-Command Line Parameters
-=======================
-
-The default value for each parameter is generally the recommended setting,
-unless otherwise noted.
-
-NOTES: For more information about the InterruptThrottleRate,
- RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay
- parameters, see the application note at:
- http://www.intel.com/design/network/applnots/ap450.htm
-
-InterruptThrottleRate
----------------------
-Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
- 4=simplified balancing)
-Default Value: 3
-
-The driver can limit the amount of interrupts per second that the adapter
-will generate for incoming packets. It does this by writing a value to the
-adapter that is based on the maximum amount of interrupts that the adapter
-will generate per second.
-
-Setting InterruptThrottleRate to a value greater or equal to 100
-will program the adapter to send out a maximum of that many interrupts
-per second, even if more packets have come in. This reduces interrupt
-load on the system and can lower CPU utilization under heavy load,
-but will increase latency as packets are not processed as quickly.
-
-The driver has two adaptive modes (setting 1 or 3) in which
-it dynamically adjusts the InterruptThrottleRate value based on the traffic
-that it receives. After determining the type of incoming traffic in the last
-timeframe, it will adjust the InterruptThrottleRate to an appropriate value
-for that traffic.
-
-The algorithm classifies the incoming traffic every interval into
-classes. Once the class is determined, the InterruptThrottleRate value is
-adjusted to suit that traffic type the best. There are three classes defined:
-"Bulk traffic", for large amounts of packets of normal size; "Low latency",
-for small amounts of traffic and/or a significant percentage of small
-packets; and "Lowest latency", for almost completely small packets or
-minimal traffic.
-
-In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
-for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
-latency" or "Lowest latency" class, the InterruptThrottleRate is increased
-stepwise to 20000. This default mode is suitable for most applications.
-
-For situations where low latency is vital such as cluster or
-grid computing, the algorithm can reduce latency even more when
-InterruptThrottleRate is set to mode 1. In this mode, which operates
-the same as mode 3, the InterruptThrottleRate will be increased stepwise to
-70000 for traffic in class "Lowest latency".
-
-In simplified mode the interrupt rate is based on the ratio of Tx and
-Rx traffic. If the bytes per second rate is approximately equal the
-interrupt rate will drop as low as 2000 interrupts per second. If the
-traffic is mostly transmit or mostly receive, the interrupt rate could
-be as high as 8000.
-
-Setting InterruptThrottleRate to 0 turns off any interrupt moderation
-and may improve small packet latency, but is generally not suitable
-for bulk throughput traffic.
-
-NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
- RxAbsIntDelay parameters. In other words, minimizing the receive
- and/or transmit absolute delays does not force the controller to
- generate more interrupts than what the Interrupt Throttle Rate
- allows.
-
-NOTE: When e1000e is loaded with default settings and multiple adapters
- are in use simultaneously, the CPU utilization may increase non-
- linearly. In order to limit the CPU utilization without impacting
- the overall throughput, we recommend that you load the driver as
- follows:
-
- modprobe e1000e InterruptThrottleRate=3000,3000,3000
-
- This sets the InterruptThrottleRate to 3000 interrupts/sec for
- the first, second, and third instances of the driver. The range
- of 2000 to 3000 interrupts per second works on a majority of
- systems and is a good starting point, but the optimal value will
- be platform-specific. If CPU utilization is not a concern, use
- RX_POLLING (NAPI) and default driver settings.
-
-RxIntDelay
-----------
-Valid Range: 0-65535 (0=off)
-Default Value: 0
-
-This value delays the generation of receive interrupts in units of 1.024
-microseconds. Receive interrupt reduction can improve CPU efficiency if
-properly tuned for specific network traffic. Increasing this value adds
-extra latency to frame reception and can end up decreasing the throughput
-of TCP traffic. If the system is reporting dropped receives, this value
-may be set too high, causing the driver to run out of available receive
-descriptors.
-
-CAUTION: When setting RxIntDelay to a value other than 0, adapters may
- hang (stop transmitting) under certain network conditions. If
- this occurs a NETDEV WATCHDOG message is logged in the system
- event log. In addition, the controller is automatically reset,
- restoring the network connection. To eliminate the potential
- for the hang ensure that RxIntDelay is set to 0.
-
-RxAbsIntDelay
--------------
-Valid Range: 0-65535 (0=off)
-Default Value: 8
-
-This value, in units of 1.024 microseconds, limits the delay in which a
-receive interrupt is generated. Useful only if RxIntDelay is non-zero,
-this value ensures that an interrupt is generated after the initial
-packet is received within the set amount of time. Proper tuning,
-along with RxIntDelay, may improve traffic throughput in specific network
-conditions.
-
-TxIntDelay
-----------
-Valid Range: 0-65535 (0=off)
-Default Value: 8
-
-This value delays the generation of transmit interrupts in units of
-1.024 microseconds. Transmit interrupt reduction can improve CPU
-efficiency if properly tuned for specific network traffic. If the
-system is reporting dropped transmits, this value may be set too high
-causing the driver to run out of available transmit descriptors.
-
-TxAbsIntDelay
--------------
-Valid Range: 0-65535 (0=off)
-Default Value: 32
-
-This value, in units of 1.024 microseconds, limits the delay in which a
-transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
-this value ensures that an interrupt is generated after the initial
-packet is sent on the wire within the set amount of time. Proper tuning,
-along with TxIntDelay, may improve traffic throughput in specific
-network conditions.
-
-Copybreak
----------
-Valid Range: 0-xxxxxxx (0=off)
-Default Value: 256
-
-Driver copies all packets below or equaling this size to a fresh Rx
-buffer before handing it up the stack.
-
-This parameter is different than other parameters, in that it is a
-single (not 1,1,1 etc.) parameter applied to all driver instances and
-it is also available during runtime at
-/sys/module/e1000e/parameters/copybreak
-
-SmartPowerDownEnable
---------------------
-Valid Range: 0-1
-Default Value: 0 (disabled)
-
-Allows PHY to turn off in lower power states. The user can set this parameter
-in supported chipsets.
-
-KumeranLockLoss
----------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
-
-This workaround skips resetting the PHY at shutdown for the initial
-silicon releases of ICH8 systems.
-
-IntMode
--------
-Valid Range: 0-2 (0=legacy, 1=MSI, 2=MSI-X)
-Default Value: 2
-
-Allows changing the interrupt mode at module load time, without requiring a
-recompile. If the driver load fails to enable a specific interrupt mode, the
-driver will try other interrupt modes, from least to most compatible. The
-interrupt order is MSI-X, MSI, Legacy. If specifying MSI (IntMode=1)
-interrupts, only MSI and Legacy will be attempted.
-
-CrcStripping
-------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
-
-Strip the CRC from received packets before sending up the network stack. If
-you have a machine with a BMC enabled but cannot receive IPMI traffic after
-loading or enabling the driver, try disabling this feature.
-
-WriteProtectNVM
----------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
-
-Set the hardware to ignore all write/erase cycles to the GbE region in the
-ICHx NVM (non-volatile memory). This feature can be disabled by the
-WriteProtectNVM module parameter (enabled by default) only after a hardware
-reset, but the machine must be power cycled before trying to enable writes.
-
-Note: the kernel boot option iomem=relaxed may need to be set if the kernel
-config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the
-NVM from user space via ethtool.
-
-Additional Configurations
-=========================
-
- Jumbo Frames
- ------------
- Jumbo Frames support is enabled by changing the MTU to a value larger than
- the default of 1500. Use the ifconfig command to increase the MTU size.
- For example:
-
- ifconfig eth mtu 9000 up
-
- This setting is not saved across reboots.
-
- Notes:
-
- - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
- with the maximum Jumbo Frames size of 9234 bytes.
-
- - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in
- poor performance or loss of link.
-
- - Some adapters limit Jumbo Frames sized packets to a maximum of
- 4096 bytes and some adapters do not support Jumbo Frames.
-
-
- Ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. We
- strongly recommend downloading the latest version of Ethtool at:
-
- http://sourceforge.net/projects/gkernel.
-
- Speed and Duplex
- ----------------
- Speed and Duplex are configured through the Ethtool* utility. For
- instructions, refer to the Ethtool man page.
-
- Enabling Wake on LAN* (WoL)
- ---------------------------
- WoL is configured through the Ethtool* utility. For instructions on
- enabling WoL with Ethtool, refer to the Ethtool man page.
-
- WoL will be enabled on the system during the next shut down or reboot.
- For this driver version, in order to enable WoL, the e1000e driver must be
- loaded when shutting down or rebooting the system.
-
- In most cases Wake On LAN is only supported on port A for multiple port
- adapters. To verify if a port supports Wake on LAN run ethtool eth.
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- www.intel.com/support/
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/trunk/Documentation/networking/ixgbevf.txt b/trunk/Documentation/networking/ixgbevf.txt
old mode 100644
new mode 100755
index 21dd5d15b6b4..19015de6725f
--- a/trunk/Documentation/networking/ixgbevf.txt
+++ b/trunk/Documentation/networking/ixgbevf.txt
@@ -1,16 +1,19 @@
Linux* Base Driver for Intel(R) Network Connection
==================================================
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+November 24, 2009
Contents
========
+- In This Release
- Identifying Your Adapter
- Known Issues/Troubleshooting
- Support
+In This Release
+===============
+
This file describes the ixgbevf Linux* Base Driver for Intel Network
Connection.
@@ -30,7 +33,7 @@ Identifying Your Adapter
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
- http://support.intel.com/support/go/network/adapter/idguide.htm
+ http://support.intel.com/support/network/sb/CS-008441.htm
Known Issues/Troubleshooting
============================
@@ -54,3 +57,34 @@ or the Intel Wired Networking project hosted by Sourceforge at:
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related
to the issue to e1000-devel@lists.sf.net
+
+License
+=======
+
+Intel 10 Gigabit Linux driver.
+Copyright(c) 1999 - 2009 Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+The full GNU General Public License is included in this distribution in
+the file called "COPYING".
+
+Trademarks
+==========
+
+Intel, Itanium, and Pentium are trademarks or registered trademarks of
+Intel Corporation or its subsidiaries in the United States and other
+countries.
+
+* Other names and brands may be claimed as the property of others.
diff --git a/trunk/Documentation/power/regulator/overview.txt b/trunk/Documentation/power/regulator/overview.txt
index 8ed17587a74b..9363e056188a 100644
--- a/trunk/Documentation/power/regulator/overview.txt
+++ b/trunk/Documentation/power/regulator/overview.txt
@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
current limit is controllable).
(C) 2008 Wolfson Microelectronics PLC.
-Author: Liam Girdwood
+Author: Liam Girdwood
Nomenclature
diff --git a/trunk/Documentation/vm/page-types.c b/trunk/Documentation/vm/page-types.c
index cc96ee2666f2..ccd951fa94ee 100644
--- a/trunk/Documentation/vm/page-types.c
+++ b/trunk/Documentation/vm/page-types.c
@@ -478,7 +478,7 @@ static void prepare_hwpoison_fd(void)
}
if (opt_unpoison && !hwpoison_forget_fd) {
- sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs);
+ sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
hwpoison_forget_fd = checked_open(buf, O_WRONLY);
}
}
diff --git a/trunk/Documentation/workqueue.txt b/trunk/Documentation/workqueue.txt
deleted file mode 100644
index e4498a2872c3..000000000000
--- a/trunk/Documentation/workqueue.txt
+++ /dev/null
@@ -1,380 +0,0 @@
-
-Concurrency Managed Workqueue (cmwq)
-
-September, 2010 Tejun Heo
- Florian Mickler
-
-CONTENTS
-
-1. Introduction
-2. Why cmwq?
-3. The Design
-4. Application Programming Interface (API)
-5. Example Execution Scenarios
-6. Guidelines
-
-
-1. Introduction
-
-There are many cases where an asynchronous process execution context
-is needed and the workqueue (wq) API is the most commonly used
-mechanism for such cases.
-
-When such an asynchronous execution context is needed, a work item
-describing which function to execute is put on a queue. An
-independent thread serves as the asynchronous execution context. The
-queue is called workqueue and the thread is called worker.
-
-While there are work items on the workqueue the worker executes the
-functions associated with the work items one after the other. When
-there is no work item left on the workqueue the worker becomes idle.
-When a new work item gets queued, the worker begins executing again.
-
-
-2. Why cmwq?
-
-In the original wq implementation, a multi threaded (MT) wq had one
-worker thread per CPU and a single threaded (ST) wq had one worker
-thread system-wide. A single MT wq needed to keep around the same
-number of workers as the number of CPUs. The kernel grew a lot of MT
-wq users over the years and with the number of CPU cores continuously
-rising, some systems saturated the default 32k PID space just booting
-up.
-
-Although MT wq wasted a lot of resource, the level of concurrency
-provided was unsatisfactory. The limitation was common to both ST and
-MT wq albeit less severe on MT. Each wq maintained its own separate
-worker pool. A MT wq could provide only one execution context per CPU
-while a ST wq one for the whole system. Work items had to compete for
-those very limited execution contexts leading to various problems
-including proneness to deadlocks around the single execution context.
-
-The tension between the provided level of concurrency and resource
-usage also forced its users to make unnecessary tradeoffs like libata
-choosing to use ST wq for polling PIOs and accepting an unnecessary
-limitation that no two polling PIOs can progress at the same time. As
-MT wq don't provide much better concurrency, users which require
-higher level of concurrency, like async or fscache, had to implement
-their own thread pool.
-
-Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
-focus on the following goals.
-
-* Maintain compatibility with the original workqueue API.
-
-* Use per-CPU unified worker pools shared by all wq to provide
- flexible level of concurrency on demand without wasting a lot of
- resource.
-
-* Automatically regulate worker pool and level of concurrency so that
- the API users don't need to worry about such details.
-
-
-3. The Design
-
-In order to ease the asynchronous execution of functions a new
-abstraction, the work item, is introduced.
-
-A work item is a simple struct that holds a pointer to the function
-that is to be executed asynchronously. Whenever a driver or subsystem
-wants a function to be executed asynchronously it has to set up a work
-item pointing to that function and queue that work item on a
-workqueue.
-
-Special purpose threads, called worker threads, execute the functions
-off of the queue, one after the other. If no work is queued, the
-worker threads become idle. These worker threads are managed in so
-called thread-pools.
-
-The cmwq design differentiates between the user-facing workqueues that
-subsystems and drivers queue work items on and the backend mechanism
-which manages thread-pool and processes the queued work items.
-
-The backend is called gcwq. There is one gcwq for each possible CPU
-and one gcwq to serve work items queued on unbound workqueues.
-
-Subsystems and drivers can create and queue work items through special
-workqueue API functions as they see fit. They can influence some
-aspects of the way the work items are executed by setting flags on the
-workqueue they are putting the work item on. These flags include
-things like CPU locality, reentrancy, concurrency limits and more. To
-get a detailed overview refer to the API description of
-alloc_workqueue() below.
-
-When a work item is queued to a workqueue, the target gcwq is
-determined according to the queue parameters and workqueue attributes
-and appended on the shared worklist of the gcwq. For example, unless
-specifically overridden, a work item of a bound workqueue will be
-queued on the worklist of exactly that gcwq that is associated to the
-CPU the issuer is running on.
-
-For any worker pool implementation, managing the concurrency level
-(how many execution contexts are active) is an important issue. cmwq
-tries to keep the concurrency at a minimal but sufficient level.
-Minimal to save resources and sufficient in that the system is used at
-its full capacity.
-
-Each gcwq bound to an actual CPU implements concurrency management by
-hooking into the scheduler. The gcwq is notified whenever an active
-worker wakes up or sleeps and keeps track of the number of the
-currently runnable workers. Generally, work items are not expected to
-hog a CPU and consume many cycles. That means maintaining just enough
-concurrency to prevent work processing from stalling should be
-optimal. As long as there are one or more runnable workers on the
-CPU, the gcwq doesn't start execution of a new work, but, when the
-last running worker goes to sleep, it immediately schedules a new
-worker so that the CPU doesn't sit idle while there are pending work
-items. This allows using a minimal number of workers without losing
-execution bandwidth.
-
-Keeping idle workers around doesn't cost other than the memory space
-for kthreads, so cmwq holds onto idle ones for a while before killing
-them.
-
-For an unbound wq, the above concurrency management doesn't apply and
-the gcwq for the pseudo unbound CPU tries to start executing all work
-items as soon as possible. The responsibility of regulating
-concurrency level is on the users. There is also a flag to mark a
-bound wq to ignore the concurrency management. Please refer to the
-API section for details.
-
-Forward progress guarantee relies on that workers can be created when
-more execution contexts are necessary, which in turn is guaranteed
-through the use of rescue workers. All work items which might be used
-on code paths that handle memory reclaim are required to be queued on
-wq's that have a rescue-worker reserved for execution under memory
-pressure. Else it is possible that the thread-pool deadlocks waiting
-for execution contexts to free up.
-
-
-4. Application Programming Interface (API)
-
-alloc_workqueue() allocates a wq. The original create_*workqueue()
-functions are deprecated and scheduled for removal. alloc_workqueue()
-takes three arguments - @name, @flags and @max_active. @name is the
-name of the wq and also used as the name of the rescuer thread if
-there is one.
-
-A wq no longer manages execution resources but serves as a domain for
-forward progress guarantee, flush and work item attributes. @flags
-and @max_active control how work items are assigned execution
-resources, scheduled and executed.
-
-@flags:
-
- WQ_NON_REENTRANT
-
- By default, a wq guarantees non-reentrance only on the same
- CPU. A work item may not be executed concurrently on the same
- CPU by multiple workers but is allowed to be executed
- concurrently on multiple CPUs. This flag makes sure
- non-reentrance is enforced across all CPUs. Work items queued
- to a non-reentrant wq are guaranteed to be executed by at most
- one worker system-wide at any given time.
-
- WQ_UNBOUND
-
- Work items queued to an unbound wq are served by a special
- gcwq which hosts workers which are not bound to any specific
- CPU. This makes the wq behave as a simple execution context
- provider without concurrency management. The unbound gcwq
- tries to start execution of work items as soon as possible.
- Unbound wq sacrifices locality but is useful for the following
- cases.
-
- * Wide fluctuation in the concurrency level requirement is
- expected and using bound wq may end up creating large number
- of mostly unused workers across different CPUs as the issuer
- hops through different CPUs.
-
- * Long running CPU intensive workloads which can be better
- managed by the system scheduler.
-
- WQ_FREEZEABLE
-
- A freezeable wq participates in the freeze phase of the system
- suspend operations. Work items on the wq are drained and no
- new work item starts execution until thawed.
-
- WQ_RESCUER
-
- All wq which might be used in the memory reclaim paths _MUST_
- have this flag set. This reserves one worker exclusively for
- the execution of this wq under memory pressure.
-
- WQ_HIGHPRI
-
- Work items of a highpri wq are queued at the head of the
- worklist of the target gcwq and start execution regardless of
- the current concurrency level. In other words, highpri work
- items will always start execution as soon as execution
- resource is available.
-
- Ordering among highpri work items is preserved - a highpri
- work item queued after another highpri work item will start
- execution after the earlier highpri work item starts.
-
- Although highpri work items are not held back by other
- runnable work items, they still contribute to the concurrency
- level. Highpri work items in runnable state will prevent
- non-highpri work items from starting execution.
-
- This flag is meaningless for unbound wq.
-
- WQ_CPU_INTENSIVE
-
- Work items of a CPU intensive wq do not contribute to the
- concurrency level. In other words, runnable CPU intensive
- work items will not prevent other work items from starting
- execution. This is useful for bound work items which are
- expected to hog CPU cycles so that their execution is
- regulated by the system scheduler.
-
- Although CPU intensive work items don't contribute to the
- concurrency level, start of their executions is still
- regulated by the concurrency management and runnable
- non-CPU-intensive work items can delay execution of CPU
- intensive work items.
-
- This flag is meaningless for unbound wq.
-
- WQ_HIGHPRI | WQ_CPU_INTENSIVE
-
- This combination makes the wq avoid interaction with
- concurrency management completely and behave as a simple
- per-CPU execution context provider. Work items queued on a
- highpri CPU-intensive wq start execution as soon as resources
- are available and don't affect execution of other work items.
-
-@max_active:
-
-@max_active determines the maximum number of execution contexts per
-CPU which can be assigned to the work items of a wq. For example,
-with @max_active of 16, at most 16 work items of the wq can be
-executing at the same time per CPU.
-
-Currently, for a bound wq, the maximum limit for @max_active is 512
-and the default value used when 0 is specified is 256. For an unbound
-wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
-values are chosen sufficiently high such that they are not the
-limiting factor while providing protection in runaway cases.
-
-The number of active work items of a wq is usually regulated by the
-users of the wq, more specifically, by how many work items the users
-may queue at the same time. Unless there is a specific need for
-throttling the number of active work items, specifying '0' is
-recommended.
-
-Some users depend on the strict execution ordering of ST wq. The
-combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
-behavior. Work items on such wq are always queued to the unbound gcwq
-and only one work item can be active at any given time thus achieving
-the same ordering property as ST wq.
-
-
-5. Example Execution Scenarios
-
-The following example execution scenarios try to illustrate how cmwq
-behave under different configurations.
-
- Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
- w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
- again before finishing. w1 and w2 burn CPU for 5ms then sleep for
- 10ms.
-
-Ignoring all other tasks, works and processing overhead, and assuming
-simple FIFO scheduling, the following is one highly simplified version
-of possible sequences of events with the original wq.
-
- TIME IN MSECS EVENT
- 0 w0 starts and burns CPU
- 5 w0 sleeps
- 15 w0 wakes up and burns CPU
- 20 w0 finishes
- 20 w1 starts and burns CPU
- 25 w1 sleeps
- 35 w1 wakes up and finishes
- 35 w2 starts and burns CPU
- 40 w2 sleeps
- 50 w2 wakes up and finishes
-
-And with cmwq with @max_active >= 3,
-
- TIME IN MSECS EVENT
- 0 w0 starts and burns CPU
- 5 w0 sleeps
- 5 w1 starts and burns CPU
- 10 w1 sleeps
- 10 w2 starts and burns CPU
- 15 w2 sleeps
- 15 w0 wakes up and burns CPU
- 20 w0 finishes
- 20 w1 wakes up and finishes
- 25 w2 wakes up and finishes
-
-If @max_active == 2,
-
- TIME IN MSECS EVENT
- 0 w0 starts and burns CPU
- 5 w0 sleeps
- 5 w1 starts and burns CPU
- 10 w1 sleeps
- 15 w0 wakes up and burns CPU
- 20 w0 finishes
- 20 w1 wakes up and finishes
- 20 w2 starts and burns CPU
- 25 w2 sleeps
- 35 w2 wakes up and finishes
-
-Now, let's assume w1 and w2 are queued to a different wq q1 which has
-WQ_HIGHPRI set,
-
- TIME IN MSECS EVENT
- 0 w1 and w2 start and burn CPU
- 5 w1 sleeps
- 10 w2 sleeps
- 10 w0 starts and burns CPU
- 15 w0 sleeps
- 15 w1 wakes up and finishes
- 20 w2 wakes up and finishes
- 25 w0 wakes up and burns CPU
- 30 w0 finishes
-
-If q1 has WQ_CPU_INTENSIVE set,
-
- TIME IN MSECS EVENT
- 0 w0 starts and burns CPU
- 5 w0 sleeps
- 5 w1 and w2 start and burn CPU
- 10 w1 sleeps
- 15 w2 sleeps
- 15 w0 wakes up and burns CPU
- 20 w0 finishes
- 20 w1 wakes up and finishes
- 25 w2 wakes up and finishes
-
-
-6. Guidelines
-
-* Do not forget to use WQ_RESCUER if a wq may process work items which
- are used during memory reclaim. Each wq with WQ_RESCUER set has one
- rescuer thread reserved for it. If there is dependency among
- multiple work items used during memory reclaim, they should be
- queued to separate wq each with WQ_RESCUER.
-
-* Unless strict ordering is required, there is no need to use ST wq.
-
-* Unless there is a specific need, using 0 for @max_active is
- recommended. In most use cases, concurrency level usually stays
- well under the default limit.
-
-* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
- flush and work item attributes. Work items which are not involved
- in memory reclaim and don't need to be flushed as a part of a group
- of work items, and don't require any special attribute, can use one
- of the system wq. There is no difference in execution
- characteristics between using a dedicated wq and a system wq.
-
-* Unless work items are expected to consume a huge amount of CPU
- cycles, using a bound wq is usually beneficial due to the increased
- level of locality in wq operations and work item execution.
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 3d4179fbc526..e7c528ff1013 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -962,23 +962,6 @@ W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/mach-s3c6410/
-ARM/S5P ARM ARCHITECTURES
-M: Kukjin Kim
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-s5p*/
-
-ARM/SAMSUNG S5P SERIES FIMC SUPPORT
-M: Kyungmin Park
-M: Sylwester Nawrocki
-L: linux-arm-kernel@lists.infradead.org
-L: linux-media@vger.kernel.org
-S: Maintained
-F: arch/arm/plat-s5p/dev-fimc*
-F: arch/arm/plat-samsung/include/plat/*fimc*
-F: drivers/media/video/s5p-fimc/
-
ARM/SHMOBILE ARM ARCHITECTURE
M: Paul Mundt
M: Magnus Damm
@@ -1152,7 +1135,7 @@ ATLX ETHERNET DRIVERS
M: Jay Cliburn
M: Chris Snook
M: Jie Yang
-L: netdev@vger.kernel.org
+L: atl1-devel@lists.sourceforge.net
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
S: Maintained
@@ -1237,7 +1220,7 @@ F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
AVR32 ARCHITECTURE
-M: Hans-Christian Egtvedt
+M: Haavard Skinnemoen
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://avrfreaks.net/
@@ -1245,7 +1228,7 @@ S: Supported
F: arch/avr32/
AVR32/AT32AP MACHINE SUPPORT
-M: Hans-Christian Egtvedt
+M: Haavard Skinnemoen
S: Supported
F: arch/avr32/mach-at32ap/
@@ -1527,8 +1510,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
S: Supported
F: Documentation/filesystems/ceph.txt
F: fs/ceph
-F: net/ceph
-F: include/linux/ceph
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
M: David Vrabel
@@ -2218,12 +2199,6 @@ W: http://acpi4asus.sf.net
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
-EFIFB FRAMEBUFFER DRIVER
-L: linux-fbdev@vger.kernel.org
-M: Peter Jones
-S: Maintained
-F: drivers/video/efifb.c
-
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
@@ -2547,7 +2522,7 @@ S: Supported
F: drivers/scsi/gdt*
GENERIC GPIO I2C DRIVER
-M: Haavard Skinnemoen
+M: Haavard Skinnemoen
S: Supported
F: drivers/i2c/busses/i2c-gpio.c
F: include/linux/i2c-gpio.h
@@ -2682,14 +2657,9 @@ S: Maintained
F: drivers/media/video/gspca/
HARDWARE MONITORING
-M: Jean Delvare
-M: Guenter Roeck
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
-T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
-T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
-S: Maintained
+S: Orphan
F: Documentation/hwmon/
F: drivers/hwmon/
F: include/linux/hwmon*.h
@@ -3075,27 +3045,16 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ixp2000/
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
+INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe)
M: Jeff Kirsher
M: Jesse Brandeburg
M: Bruce Allan
-M: Carolyn Wyborny
-M: Don Skidmore
-M: Greg Rose
-M: PJ Waskiewicz
M: Alex Duyck
+M: PJ Waskiewicz
M: John Ronciak
L: e1000-devel@lists.sourceforge.net
W: http://e1000.sourceforge.net/
S: Supported
-F: Documentation/networking/e100.txt
-F: Documentation/networking/e1000.txt
-F: Documentation/networking/e1000e.txt
-F: Documentation/networking/igb.txt
-F: Documentation/networking/igbvf.txt
-F: Documentation/networking/ixgb.txt
-F: Documentation/networking/ixgbe.txt
-F: Documentation/networking/ixgbevf.txt
F: drivers/net/e100.c
F: drivers/net/e1000/
F: drivers/net/e1000e/
@@ -3103,7 +3062,6 @@ F: drivers/net/igb/
F: drivers/net/igbvf/
F: drivers/net/ixgb/
F: drivers/net/ixgbe/
-F: drivers/net/ixgbevf/
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
L: linux-wireless@vger.kernel.org
@@ -3164,7 +3122,7 @@ F: drivers/net/ioc3-eth.c
IOC3 SERIAL DRIVER
M: Pat Gefre
-L: linux-serial@vger.kernel.org
+L: linux-mips@linux-mips.org
S: Maintained
F: drivers/serial/ioc3_serial.c
@@ -3812,8 +3770,9 @@ W: http://www.syskonnect.com
S: Supported
MATROX FRAMEBUFFER DRIVER
+M: Petr Vandrovec
L: linux-fbdev@vger.kernel.org
-S: Orphan
+S: Maintained
F: drivers/video/matrox/matroxfb_*
F: include/linux/matroxfb.h
@@ -3937,8 +3896,10 @@ F: Documentation/serial/moxa-smartio
F: drivers/char/mxser.*
MSI LAPTOP SUPPORT
-M: Lee, Chun-Yi
+M: Lennart Poettering
L: platform-driver-x86@vger.kernel.org
+W: https://tango.0pointer.de/mailman/listinfo/s270-linux
+W: http://0pointer.de/lennart/tchibo.html
S: Maintained
F: drivers/platform/x86/msi-laptop.c
@@ -3955,10 +3916,8 @@ S: Supported
F: drivers/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
-M: Chris Ball
+S: Orphan
L: linux-mmc@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
-S: Maintained
F: drivers/mmc/
F: include/linux/mmc/
@@ -3980,7 +3939,7 @@ F: drivers/char/isicom.c
F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M: Felipe Balbi
+M: Felipe Balbi
L: linux-usb@vger.kernel.org
T: git git://gitorious.org/usb/usb.git
S: Maintained
@@ -4000,8 +3959,8 @@ S: Maintained
F: drivers/net/natsemi.c
NCP FILESYSTEM
-M: Petr Vandrovec
-S: Odd Fixes
+M: Petr Vandrovec
+S: Maintained
F: fs/ncpfs/
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
@@ -4278,7 +4237,7 @@ S: Maintained
F: drivers/char/hw_random/omap-rng.c
OMAP USB SUPPORT
-M: Felipe Balbi
+M: Felipe Balbi
M: David Brownell
L: linux-usb@vger.kernel.org
L: linux-omap@vger.kernel.org
@@ -4807,15 +4766,6 @@ F: fs/qnx4/
F: include/linux/qnx4_fs.h
F: include/linux/qnxtypes.h
-RADOS BLOCK DEVICE (RBD)
-F: include/linux/qnxtypes.h
-M: Yehuda Sadeh
-M: Sage Weil
-M: ceph-devel@vger.kernel.org
-S: Supported
-F: drivers/block/rbd.c
-F: drivers/block/rbd_types.h
-
RADEON FRAMEBUFFER DISPLAY DRIVER
M: Benjamin Herrenschmidt
L: linux-fbdev@vger.kernel.org
@@ -5041,12 +4991,6 @@ F: drivers/media/common/saa7146*
F: drivers/media/video/*7146*
F: include/media/*7146*
-SAMSUNG AUDIO (ASoC) DRIVERS
-M: Jassi Brar
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Supported
-F: sound/soc/s3c24xx
-
TLG2300 VIDEO4LINUX-2 DRIVER
M: Huang Shijie
M: Kang Yong
@@ -5144,10 +5088,8 @@ S: Maintained
F: drivers/mmc/host/sdricoh_cs.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-M: Chris Ball
+S: Orphan
L: linux-mmc@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
-S: Maintained
F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
@@ -6489,10 +6431,8 @@ F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS
M: Mark Brown
M: Ian Lartey
-M: Dimitris Papastamos
-T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
-W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
+W: http://opensource.wolfsonmicro.com/node/8
S: Supported
F: Documentation/hwmon/wm83??
F: drivers/leds/leds-wm83*.c
diff --git a/trunk/Makefile b/trunk/Makefile
index d3c10719bbbd..92ab33f16cf0 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
-EXTRAVERSION =
-NAME = Flesh-Eating Bats with Fangs
+EXTRAVERSION = -rc4
+NAME = Sheep on Meth
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -568,12 +568,6 @@ endif
ifdef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -pg
-ifdef CONFIG_DYNAMIC_FTRACE
- ifdef CONFIG_HAVE_C_RECORDMCOUNT
- BUILD_C_RECORDMCOUNT := y
- export BUILD_C_RECORDMCOUNT
- endif
-endif
endif
# We trigger additional mismatches with less inlining
@@ -597,11 +591,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# But warn user when we do so
warn-assign = \
diff --git a/trunk/arch/Kconfig b/trunk/arch/Kconfig
index 53d7f619a1b9..4877a8c8ee16 100644
--- a/trunk/arch/Kconfig
+++ b/trunk/arch/Kconfig
@@ -32,9 +32,8 @@ config HAVE_OPROFILE
config KPROBES
bool "Kprobes"
- depends on MODULES
+ depends on KALLSYMS && MODULES
depends on HAVE_KPROBES
- select KALLSYMS
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@@ -46,6 +45,7 @@ config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
+ select KALLSYMS_ALL
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
@@ -158,7 +158,4 @@ config HAVE_PERF_EVENTS_NMI
subsystem. Also has support for calculating CPU cycle events
to determine how many clock cycles in a given period.
-config HAVE_ARCH_JUMP_LABEL
- bool
-
source "kernel/gcov/Kconfig"
diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig
index d04ccd73af45..b9647bb66d13 100644
--- a/trunk/arch/alpha/Kconfig
+++ b/trunk/arch/alpha/Kconfig
@@ -9,7 +9,6 @@ config ALPHA
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
help
diff --git a/trunk/arch/alpha/include/asm/cacheflush.h b/trunk/arch/alpha/include/asm/cacheflush.h
index 012f1243b1c1..01d71e1c8a9e 100644
--- a/trunk/arch/alpha/include/asm/cacheflush.h
+++ b/trunk/arch/alpha/include/asm/cacheflush.h
@@ -43,8 +43,6 @@ extern void smp_imb(void);
/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
#ifndef CONFIG_SMP
-#include
-
extern void __load_new_mm_context(struct mm_struct *);
static inline void
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
diff --git a/trunk/arch/alpha/include/asm/perf_event.h b/trunk/arch/alpha/include/asm/perf_event.h
index fe792ca818f6..4157cd3c44a9 100644
--- a/trunk/arch/alpha/include/asm/perf_event.h
+++ b/trunk/arch/alpha/include/asm/perf_event.h
@@ -1,6 +1,11 @@
#ifndef __ASM_ALPHA_PERF_EVENT_H
#define __ASM_ALPHA_PERF_EVENT_H
+/* Alpha only supports software events through this interface. */
+extern void set_perf_event_pending(void);
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
#else
diff --git a/trunk/arch/alpha/include/asm/unistd.h b/trunk/arch/alpha/include/asm/unistd.h
index 058937bf5a77..804e5311c841 100644
--- a/trunk/arch/alpha/include/asm/unistd.h
+++ b/trunk/arch/alpha/include/asm/unistd.h
@@ -449,13 +449,10 @@
#define __NR_pwritev 491
#define __NR_rt_tgsigqueueinfo 492
#define __NR_perf_event_open 493
-#define __NR_fanotify_init 494
-#define __NR_fanotify_mark 495
-#define __NR_prlimit64 496
#ifdef __KERNEL__
-#define NR_SYSCALLS 497
+#define NR_SYSCALLS 494
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
@@ -466,7 +463,6 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
/* "Conditional" syscalls. What we want is
diff --git a/trunk/arch/alpha/kernel/entry.S b/trunk/arch/alpha/kernel/entry.S
index 6d159cee5f2f..b45d913a51c3 100644
--- a/trunk/arch/alpha/kernel/entry.S
+++ b/trunk/arch/alpha/kernel/entry.S
@@ -73,6 +73,8 @@
ldq $20, HAE_REG($19); \
stq $21, HAE_CACHE($19); \
stq $21, 0($20); \
+ ldq $0, 0($sp); \
+ ldq $1, 8($sp); \
99:; \
ldq $19, 72($sp); \
ldq $20, 80($sp); \
@@ -314,24 +316,19 @@ ret_from_sys_call:
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
- beq $0, ret_to_kernel
-ret_to_user:
+ beq $0, restore_all
+ret_from_reschedule:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
lda $16, 7
call_pal PAL_swpipl
ldl $5, TI_FLAGS($8)
and $5, _TIF_WORK_MASK, $2
- bne $2, work_pending
+ bne $5, work_pending
restore_all:
RESTORE_ALL
call_pal PAL_rti
-ret_to_kernel:
- lda $16, 7
- call_pal PAL_swpipl
- br restore_all
-
.align 3
$syscall_error:
/*
@@ -366,7 +363,7 @@ $ret_success:
* $8: current.
* $19: The old syscall number, or zero if this is not a return
* from a syscall that errored and is possibly restartable.
- * $20: The old a3 value
+ * $20: Error indication.
*/
.align 4
@@ -395,18 +392,12 @@ $work_resched:
$work_notifysig:
mov $sp, $16
- bsr $1, do_switch_stack
+ br $1, do_switch_stack
mov $sp, $17
mov $5, $18
- mov $19, $9 /* save old syscall number */
- mov $20, $10 /* save old a3 */
- and $5, _TIF_SIGPENDING, $2
- cmovne $2, 0, $9 /* we don't want double syscall restarts */
jsr $26, do_notify_resume
- mov $9, $19
- mov $10, $20
bsr $1, undo_switch_stack
- br ret_to_user
+ br restore_all
.end work_pending
/*
@@ -439,7 +430,6 @@ strace:
beq $1, 1f
ldq $27, 0($2)
1: jsr $26, ($27), sys_gettimeofday
-ret_from_straced:
ldgp $gp, 0($26)
/* check return.. */
@@ -660,7 +650,7 @@ kernel_thread:
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($sp) /* $0 */
- br ret_to_kernel
+ br restore_all
.end kernel_thread
/*
@@ -767,15 +757,11 @@ sys_vfork:
.ent sys_sigreturn
sys_sigreturn:
.prologue 0
- lda $9, ret_from_straced
- cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_sigreturn
- bne $9, 1f
- jsr $26, syscall_trace
-1: br $1, undo_switch_stack
+ br $1, undo_switch_stack
br ret_from_sys_call
.end sys_sigreturn
@@ -784,18 +770,46 @@ sys_sigreturn:
.ent sys_rt_sigreturn
sys_rt_sigreturn:
.prologue 0
- lda $9, ret_from_straced
- cmpult $26, $9, $9
mov $sp, $17
lda $18, -SWITCH_STACK_SIZE($sp)
lda $sp, -SWITCH_STACK_SIZE($sp)
jsr $26, do_rt_sigreturn
- bne $9, 1f
- jsr $26, syscall_trace
-1: br $1, undo_switch_stack
+ br $1, undo_switch_stack
br ret_from_sys_call
.end sys_rt_sigreturn
+ .align 4
+ .globl sys_sigsuspend
+ .ent sys_sigsuspend
+sys_sigsuspend:
+ .prologue 0
+ mov $sp, $17
+ br $1, do_switch_stack
+ mov $sp, $18
+ subq $sp, 16, $sp
+ stq $26, 0($sp)
+ jsr $26, do_sigsuspend
+ ldq $26, 0($sp)
+ lda $sp, SWITCH_STACK_SIZE+16($sp)
+ ret
+.end sys_sigsuspend
+
+ .align 4
+ .globl sys_rt_sigsuspend
+ .ent sys_rt_sigsuspend
+sys_rt_sigsuspend:
+ .prologue 0
+ mov $sp, $18
+ br $1, do_switch_stack
+ mov $sp, $19
+ subq $sp, 16, $sp
+ stq $26, 0($sp)
+ jsr $26, do_rt_sigsuspend
+ ldq $26, 0($sp)
+ lda $sp, SWITCH_STACK_SIZE+16($sp)
+ ret
+.end sys_rt_sigsuspend
+
.align 4
.globl sys_sethae
.ent sys_sethae
@@ -914,6 +928,15 @@ sys_execve:
jmp $31, do_sys_execve
.end sys_execve
+ .align 4
+ .globl osf_sigprocmask
+ .ent osf_sigprocmask
+osf_sigprocmask:
+ .prologue 0
+ mov $sp, $18
+ jmp $31, sys_osf_sigprocmask
+.end osf_sigprocmask
+
.align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall
diff --git a/trunk/arch/alpha/kernel/err_ev6.c b/trunk/arch/alpha/kernel/err_ev6.c
index 253cf1a87481..8ca6345bf131 100644
--- a/trunk/arch/alpha/kernel/err_ev6.c
+++ b/trunk/arch/alpha/kernel/err_ev6.c
@@ -90,13 +90,11 @@ static int
ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
u64 c_stat, u64 c_sts, int print)
{
- static const char * const sourcename[] = {
- "UNKNOWN", "UNKNOWN", "UNKNOWN",
- "MEMORY", "BCACHE", "DCACHE",
- "BCACHE PROBE", "BCACHE PROBE"
- };
- static const char * const streamname[] = { "D", "I" };
- static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
+ char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "MEMORY", "BCACHE", "DCACHE",
+ "BCACHE PROBE", "BCACHE PROBE" };
+ char *streamname[] = { "D", "I" };
+ char *bitsname[] = { "SINGLE", "DOUBLE" };
int status = MCHK_DISPOSITION_REPORT;
int source = -1, stream = -1, bits = -1;
diff --git a/trunk/arch/alpha/kernel/err_marvel.c b/trunk/arch/alpha/kernel/err_marvel.c
index 648ae88aeb8a..5c905aaaeccd 100644
--- a/trunk/arch/alpha/kernel/err_marvel.c
+++ b/trunk/arch/alpha/kernel/err_marvel.c
@@ -589,23 +589,22 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
static void
marvel_print_pox_trans_sum(u64 trans_sum)
{
- static const char * const pcix_cmd[] = {
- "Interrupt Acknowledge",
- "Special Cycle",
- "I/O Read",
- "I/O Write",
- "Reserved",
- "Reserved / Device ID Message",
- "Memory Read",
- "Memory Write",
- "Reserved / Alias to Memory Read Block",
- "Reserved / Alias to Memory Write Block",
- "Configuration Read",
- "Configuration Write",
- "Memory Read Multiple / Split Completion",
- "Dual Address Cycle",
- "Memory Read Line / Memory Read Block",
- "Memory Write and Invalidate / Memory Write Block"
+ char *pcix_cmd[] = { "Interrupt Acknowledge",
+ "Special Cycle",
+ "I/O Read",
+ "I/O Write",
+ "Reserved",
+ "Reserved / Device ID Message",
+ "Memory Read",
+ "Memory Write",
+ "Reserved / Alias to Memory Read Block",
+ "Reserved / Alias to Memory Write Block",
+ "Configuration Read",
+ "Configuration Write",
+ "Memory Read Multiple / Split Completion",
+ "Dual Address Cycle",
+ "Memory Read Line / Memory Read Block",
+ "Memory Write and Invalidate / Memory Write Block"
};
#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
diff --git a/trunk/arch/alpha/kernel/err_titan.c b/trunk/arch/alpha/kernel/err_titan.c
index c3b3781a03de..f7ed97ce0dfd 100644
--- a/trunk/arch/alpha/kernel/err_titan.c
+++ b/trunk/arch/alpha/kernel/err_titan.c
@@ -75,12 +75,8 @@ titan_parse_p_serror(int which, u64 serror, int print)
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
- static const char * const serror_src[] = {
- "GPCI", "APCI", "AGP HP", "AGP LP"
- };
- static const char * const serror_cmd[] = {
- "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
- };
+ char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
+ char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
#endif /* CONFIG_VERBOSE_MCHECK */
#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -144,15 +140,14 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
int status = MCHK_DISPOSITION_REPORT;
#ifdef CONFIG_VERBOSE_MCHECK
- static const char * const perror_cmd[] = {
- "Interrupt Acknowledge", "Special Cycle",
- "I/O Read", "I/O Write",
- "Reserved", "Reserved",
- "Memory Read", "Memory Write",
- "Reserved", "Reserved",
- "Configuration Read", "Configuration Write",
- "Memory Read Multiple", "Dual Address Cycle",
- "Memory Read Line", "Memory Write and Invalidate"
+ char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
+ "I/O Read", "I/O Write",
+ "Reserved", "Reserved",
+ "Memory Read", "Memory Write",
+ "Reserved", "Reserved",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line","Memory Write and Invalidate"
};
#endif /* CONFIG_VERBOSE_MCHECK */
@@ -278,11 +273,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
int cmd, len;
unsigned long addr;
- static const char * const agperror_cmd[] = {
- "Read (low-priority)", "Read (high-priority)",
- "Write (low-priority)", "Write (high-priority)",
- "Reserved", "Reserved",
- "Flush", "Fence"
+ char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
+ "Write (low-priority)",
+ "Write (high-priority)",
+ "Reserved", "Reserved",
+ "Flush", "Fence"
};
#endif /* CONFIG_VERBOSE_MCHECK */
diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c
index 547e8b84b2f7..5d1e6d6ce684 100644
--- a/trunk/arch/alpha/kernel/osf_sys.c
+++ b/trunk/arch/alpha/kernel/osf_sys.c
@@ -15,6 +15,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -68,6 +69,7 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
{
struct mm_struct *mm;
+ lock_kernel();
mm = current->mm;
mm->end_code = bss_start + bss_len;
mm->start_brk = bss_start + bss_len;
@@ -76,6 +78,7 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
printk("set_program_attributes(%lx %lx %lx %lx)\n",
text_start, text_len, bss_start, bss_len);
#endif
+ unlock_kernel();
return 0;
}
@@ -514,6 +517,7 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
long error;
int __user *min_buf_size_ptr;
+ lock_kernel();
switch (code) {
case PL_SET:
if (get_user(error, &args->set.nbytes))
@@ -543,6 +547,7 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
error = -EOPNOTSUPP;
break;
};
+ unlock_kernel();
return error;
}
@@ -589,7 +594,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
{
- const char *sysinfo_table[] = {
+ char *sysinfo_table[] = {
utsname()->sysname,
utsname()->nodename,
utsname()->release,
@@ -601,7 +606,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
"dummy", /* secure RPC domain */
};
unsigned long offset;
- const char *res;
+ char *res;
long len, err = -EINVAL;
offset = command-1;
diff --git a/trunk/arch/alpha/kernel/pci-sysfs.c b/trunk/arch/alpha/kernel/pci-sysfs.c
index b899e95f79fd..738fc824e2ea 100644
--- a/trunk/arch/alpha/kernel/pci-sysfs.c
+++ b/trunk/arch/alpha/kernel/pci-sysfs.c
@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
struct device, kobj));
- struct resource *res = attr->private;
+ struct resource *res = (struct resource *)attr->private;
enum pci_mmap_state mmap_type;
struct pci_bus_region bar;
int i;
diff --git a/trunk/arch/alpha/kernel/perf_event.c b/trunk/arch/alpha/kernel/perf_event.c
index 1cc49683fb69..85d8e4f58c83 100644
--- a/trunk/arch/alpha/kernel/perf_event.c
+++ b/trunk/arch/alpha/kernel/perf_event.c
@@ -307,7 +307,7 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
+ delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
@@ -402,13 +402,14 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;
- if (cpuc->current_idx[j] == PMC_NO_INDEX) {
- alpha_perf_event_set_period(pe, hwc, idx);
- cpuc->current_idx[j] = idx;
+ if (cpuc->current_idx[j] != PMC_NO_INDEX) {
+ cpuc->idx_mask |= (1<current_idx[j]);
+ continue;
}
- if (!(hwc->state & PERF_HES_STOPPED))
- cpuc->idx_mask |= (1<current_idx[j]);
+ alpha_perf_event_set_period(pe, hwc, idx);
+ cpuc->current_idx[j] = idx;
+ cpuc->idx_mask |= (1<current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
@@ -419,13 +420,12 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
-static int alpha_pmu_add(struct perf_event *event, int flags)
+static int alpha_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
- unsigned long irq_flags;
+ unsigned long flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
@@ -435,8 +435,8 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
- perf_pmu_disable(event->pmu);
- local_irq_save(irq_flags);
+ perf_disable();
+ local_irq_save(flags);
/* Default to error to be returned */
ret = -EAGAIN;
@@ -455,12 +455,8 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
}
}
- hwc->state = PERF_HES_UPTODATE;
- if (!(flags & PERF_EF_START))
- hwc->state |= PERF_HES_STOPPED;
-
- local_irq_restore(irq_flags);
- perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+ perf_enable();
return ret;
}
@@ -471,15 +467,15 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
-static void alpha_pmu_del(struct perf_event *event, int flags)
+static void alpha_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- unsigned long irq_flags;
+ unsigned long flags;
int j;
- perf_pmu_disable(event->pmu);
- local_irq_save(irq_flags);
+ perf_disable();
+ local_irq_save(flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
@@ -505,8 +501,8 @@ static void alpha_pmu_del(struct perf_event *event, int flags)
}
}
- local_irq_restore(irq_flags);
- perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+ perf_enable();
}
@@ -518,44 +514,13 @@ static void alpha_pmu_read(struct perf_event *event)
}
-static void alpha_pmu_stop(struct perf_event *event, int flags)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (!(hwc->state & PERF_HES_STOPPED)) {
- cpuc->idx_mask &= ~(1UL<idx);
- hwc->state |= PERF_HES_STOPPED;
- }
-
- if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- alpha_perf_event_update(event, hwc, hwc->idx, 0);
- hwc->state |= PERF_HES_UPTODATE;
- }
-
- if (cpuc->enabled)
- wrperfmon(PERFMON_CMD_DISABLE, (1UL<idx));
-}
-
-
-static void alpha_pmu_start(struct perf_event *event, int flags)
+static void alpha_pmu_unthrottle(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
- return;
-
- if (flags & PERF_EF_RELOAD) {
- WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
- alpha_perf_event_set_period(event, hwc, hwc->idx);
- }
-
- hwc->state = 0;
-
cpuc->idx_mask |= 1UL<idx;
- if (cpuc->enabled)
- wrperfmon(PERFMON_CMD_ENABLE, (1UL<idx));
+ wrperfmon(PERFMON_CMD_ENABLE, (1UL<idx));
}
@@ -677,36 +642,39 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
+static const struct pmu pmu = {
+ .enable = alpha_pmu_enable,
+ .disable = alpha_pmu_disable,
+ .read = alpha_pmu_read,
+ .unthrottle = alpha_pmu_unthrottle,
+};
+
+
/*
* Main entry point to initialise a HW performance event.
*/
-static int alpha_pmu_event_init(struct perf_event *event)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err;
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
- return -ENOENT;
- }
-
if (!alpha_pmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
/* Do the real initialisation work. */
err = __hw_perf_event_init(event);
- return err;
+ if (err)
+ return ERR_PTR(err);
+
+ return &pmu;
}
+
+
/*
* Main entry point - enable HW performance counters.
*/
-static void alpha_pmu_enable(struct pmu *pmu)
+void hw_perf_enable(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -732,7 +700,7 @@ static void alpha_pmu_enable(struct pmu *pmu)
* Main entry point - disable HW performance counters.
*/
-static void alpha_pmu_disable(struct pmu *pmu)
+void hw_perf_disable(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -745,17 +713,6 @@ static void alpha_pmu_disable(struct pmu *pmu)
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}
-static struct pmu pmu = {
- .pmu_enable = alpha_pmu_enable,
- .pmu_disable = alpha_pmu_disable,
- .event_init = alpha_pmu_event_init,
- .add = alpha_pmu_add,
- .del = alpha_pmu_del,
- .start = alpha_pmu_start,
- .stop = alpha_pmu_stop,
- .read = alpha_pmu_read,
-};
-
/*
* Main entry point - don't know when this is called but it
@@ -809,7 +766,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
- if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
+ if (unlikely(la_ptr >= perf_max_events)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -850,7 +807,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
- alpha_pmu_stop(event, 0);
+ cpuc->idx_mask &= ~(1UL<idx_mask);
@@ -880,7 +837,6 @@ void __init init_hw_perf_events(void)
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
-
- perf_pmu_register(&pmu);
+ perf_max_events = alpha_pmu->num_pmcs;
}
diff --git a/trunk/arch/alpha/kernel/process.c b/trunk/arch/alpha/kernel/process.c
index 3ec35066f1dc..842dba308eab 100644
--- a/trunk/arch/alpha/kernel/process.c
+++ b/trunk/arch/alpha/kernel/process.c
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
- dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
+ dest[30] = rdusp();
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid
diff --git a/trunk/arch/alpha/kernel/signal.c b/trunk/arch/alpha/kernel/signal.c
index 6f7feb5db271..0932dbb1ef8e 100644
--- a/trunk/arch/alpha/kernel/signal.c
+++ b/trunk/arch/alpha/kernel/signal.c
@@ -41,20 +41,46 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
/*
* The OSF/1 sigprocmask calling sequence is different from the
* C sigprocmask() sequence..
+ *
+ * how:
+ * 1 - SIG_BLOCK
+ * 2 - SIG_UNBLOCK
+ * 3 - SIG_SETMASK
+ *
+ * We change the range to -1 .. 1 in order to let gcc easily
+ * use the conditional move instructions.
+ *
+ * Note that we don't need to acquire the kernel lock for SMP
+ * operation, as all of this is local to this thread.
*/
-SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
+SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
+ struct pt_regs *, regs)
{
- sigset_t oldmask;
- sigset_t mask;
- unsigned long res;
-
- siginitset(&mask, newmask & _BLOCKABLE);
- res = sigprocmask(how, &mask, &oldmask);
- if (!res) {
- force_successful_syscall_return();
- res = oldmask.sig[0];
+ unsigned long oldmask = -EINVAL;
+
+ if ((unsigned long)how-1 <= 2) {
+ long sign = how-2; /* -1 .. 1 */
+ unsigned long block, unblock;
+
+ newmask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sighand->siglock);
+ oldmask = current->blocked.sig[0];
+
+ unblock = oldmask & ~newmask;
+ block = oldmask | newmask;
+ if (!sign)
+ block = unblock;
+ if (sign <= 0)
+ newmask = block;
+ if (_NSIG_WORDS > 1 && sign > 0)
+ sigemptyset(¤t->blocked);
+ current->blocked.sig[0] = newmask;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->r0 = 0; /* special no error return */
}
- return res;
+ return oldmask;
}
SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -68,9 +94,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
- __get_user(mask, &act->sa_mask))
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags))
return -EFAULT;
+ __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
@@ -80,9 +106,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
return -EFAULT;
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -118,7 +144,8 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
-SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
+asmlinkage int
+do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
{
mask &= _BLOCKABLE;
spin_lock_irq(¤t->sighand->siglock);
@@ -127,6 +154,41 @@ SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
+ /* Indicate EINTR on return from any possible signal handler,
+ which will not come back through here, but via sigreturn. */
+ regs->r0 = EINTR;
+ regs->r19 = 1;
+
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ return -ERESTARTNOHAND;
+}
+
+asmlinkage int
+do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
+ struct pt_regs *regs, struct switch_stack *sw)
+{
+ sigset_t set;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+ if (copy_from_user(&set, uset, sizeof(set)))
+ return -EFAULT;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->saved_sigmask = current->blocked;
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ /* Indicate EINTR on return from any possible signal handler,
+ which will not come back through here, but via sigreturn. */
+ regs->r0 = EINTR;
+ regs->r19 = 1;
+
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -177,8 +239,6 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long usp;
long i, err = __get_user(regs->pc, &sc->sc_pc);
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
sw->r26 = (unsigned long) ret_from_sys_call;
err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -531,6 +591,7 @@ syscall_restart(unsigned long r0, unsigned long r19,
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
regs->r0 = EINTR;
break;
}
diff --git a/trunk/arch/alpha/kernel/srm_env.c b/trunk/arch/alpha/kernel/srm_env.c
index f0df3fbd8402..4afc1a1e2e5a 100644
--- a/trunk/arch/alpha/kernel/srm_env.c
+++ b/trunk/arch/alpha/kernel/srm_env.c
@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
srm_env_t *entry;
char *page;
- entry = m->private;
+ entry = (srm_env_t *)m->private;
page = (char *)__get_free_page(GFP_USER);
if (!page)
return -ENOMEM;
diff --git a/trunk/arch/alpha/kernel/systbls.S b/trunk/arch/alpha/kernel/systbls.S
index a6a1de9db16f..09acb786e72b 100644
--- a/trunk/arch/alpha/kernel/systbls.S
+++ b/trunk/arch/alpha/kernel/systbls.S
@@ -58,7 +58,7 @@ sys_call_table:
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_getxgid
- .quad sys_osf_sigprocmask
+ .quad osf_sigprocmask
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 50 */
.quad sys_acct
@@ -512,9 +512,6 @@ sys_call_table:
.quad sys_pwritev
.quad sys_rt_tgsigqueueinfo
.quad sys_perf_event_open
- .quad sys_fanotify_init
- .quad sys_fanotify_mark /* 495 */
- .quad sys_prlimit64
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/trunk/arch/alpha/kernel/time.c b/trunk/arch/alpha/kernel/time.c
index 0f1d8493cfca..eacceb26d9c8 100644
--- a/trunk/arch/alpha/kernel/time.c
+++ b/trunk/arch/alpha/kernel/time.c
@@ -41,7 +41,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -83,25 +83,25 @@ static struct {
unsigned long est_cycle_freq;
-#ifdef CONFIG_IRQ_WORK
+#ifdef CONFIG_PERF_EVENTS
-DEFINE_PER_CPU(u8, irq_work_pending);
+DEFINE_PER_CPU(u8, perf_event_pending);
-#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
-#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
-#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
+#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
+#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
+#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
-void set_irq_work_pending(void)
+void set_perf_event_pending(void)
{
- set_irq_work_pending_flag();
+ set_perf_event_pending_flag();
}
-#else /* CONFIG_IRQ_WORK */
+#else /* CONFIG_PERF_EVENTS */
-#define test_irq_work_pending() 0
-#define clear_irq_work_pending()
+#define test_perf_event_pending() 0
+#define clear_perf_event_pending()
-#endif /* CONFIG_IRQ_WORK */
+#endif /* CONFIG_PERF_EVENTS */
static inline __u32 rpcc(void)
@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
write_sequnlock(&xtime_lock);
- if (test_irq_work_pending()) {
- clear_irq_work_pending();
- irq_work_run();
- }
-
#ifndef CONFIG_SMP
while (nticks--)
update_process_times(user_mode(get_irq_regs()));
#endif
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
+ }
+
return IRQ_HANDLED;
}
diff --git a/trunk/arch/alpha/kernel/traps.c b/trunk/arch/alpha/kernel/traps.c
index 0414e021a91c..b14f015008ad 100644
--- a/trunk/arch/alpha/kernel/traps.c
+++ b/trunk/arch/alpha/kernel/traps.c
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -622,6 +623,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
return;
}
+ lock_kernel();
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
do_exit(SIGSEGV);
@@ -644,6 +646,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
* Yikes! No one to forward the exception to.
* Since the registers are in a weird format, dump them ourselves.
*/
+ lock_kernel();
printk("%s(%d): unhandled unaligned exception\n",
current->comm, task_pid_nr(current));
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 9103904b3dab..553b7cf17bfb 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -23,7 +23,6 @@ config ARM
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API
@@ -272,6 +271,7 @@ config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -1051,32 +1051,6 @@ config ARM_ERRATA_460075
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
-config ARM_ERRATA_742230
- bool "ARM errata: DMB operation may be faulty"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for the 742230 Cortex-A9
- (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
- between two write operations may not ensure the correct visibility
- ordering of the two writes. This workaround sets a specific bit in
- the diagnostic register of the Cortex-A9 which causes the DMB
- instruction to behave as a DSB, ensuring the correct behaviour of
- the two writes.
-
-config ARM_ERRATA_742231
- bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
- depends on CPU_V7 && SMP
- help
- This option enables the workaround for the 742231 Cortex-A9
- (r2p0..r2p2) erratum. Under certain conditions, specific to the
- Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
- accessing some data located in the same cache line, may get corrupted
- data due to bad handling of the address hazard when the line gets
- replaced from one of the CPUs at the same time as another CPU is
- accessing it. This workaround sets specific bits in the diagnostic
- register of the Cortex-A9 which reduces the linefill issuing
- capabilities of the processor.
-
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1102,20 +1076,6 @@ config ARM_ERRATA_720789
invalidated are not, resulting in an incoherency in the system page
tables. The workaround changes the TLB flushing routines to invalidate
entries regardless of the ASID.
-
-config ARM_ERRATA_743622
- bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
- depends on CPU_V7
- help
- This option enables the workaround for the 743622 Cortex-A9
- (r2p0..r2p2) erratum. Under very rare conditions, a faulty
- optimisation in the Cortex-A9 Store Buffer may lead to data
- corruption. This workaround sets a specific bit in the diagnostic
- register of the Cortex-A9 which disables the Store Buffer
- optimisation, preventing the defect from occurring. This has no
- visible impact on the overall performance or power consumption of the
- processor.
-
endmenu
source "arch/arm/common/Kconfig"
diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile
index 65a7c1c588a9..b23f6bc46cfa 100644
--- a/trunk/arch/arm/boot/compressed/Makefile
+++ b/trunk/arch/arm/boot/compressed/Makefile
@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
@sed "$(SEDFLAGS)" < $< > $@
diff --git a/trunk/arch/arm/common/it8152.c b/trunk/arch/arm/common/it8152.c
index 1bec96e85196..7974baacafce 100644
--- a/trunk/arch/arm/common/it8152.c
+++ b/trunk/arch/arm/common/it8152.c
@@ -271,14 +271,6 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}
-int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- if (mask >= PHYS_OFFSET + SZ_64M - 1)
- return 0;
-
- return -EIO;
-}
-
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;
diff --git a/trunk/arch/arm/include/asm/perf_event.h b/trunk/arch/arm/include/asm/perf_event.h
index c4aa4e8c6af9..b5799a3b7117 100644
--- a/trunk/arch/arm/include/asm/perf_event.h
+++ b/trunk/arch/arm/include/asm/perf_event.h
@@ -12,6 +12,18 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
+/*
+ * NOP: on *most* (read: all supported) ARM platforms, the performance
+ * counter interrupts are regular interrupts and not an NMI. This
+ * means that when we receive the interrupt we can call
+ * perf_event_do_pending() that handles all of the work with
+ * interrupts disabled.
+ */
+static inline void
+set_perf_event_pending(void)
+{
+}
+
/* ARM performance counters start from 1 (in the cp15 accesses) so use the
* same indexes here for consistency. */
#define PERF_EVENT_INDEX_OFFSET 1
diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h
index e90b167ea848..ab68cf1ef80f 100644
--- a/trunk/arch/arm/include/asm/pgtable.h
+++ b/trunk/arch/arm/include/asm/pgtable.h
@@ -317,10 +317,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/trunk/arch/arm/kernel/entry-common.S b/trunk/arch/arm/kernel/entry-common.S
index 7885722bdf4e..f05a35a59694 100644
--- a/trunk/arch/arm/kernel/entry-common.S
+++ b/trunk/arch/arm/kernel/entry-common.S
@@ -48,8 +48,6 @@ work_pending:
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
- tst r1, #_TIF_SIGPENDING @ delivering a signal?
- movne why, #0 @ prevent further restarts
bl do_notify_resume
b ret_slow_syscall @ Check work again
@@ -420,13 +418,11 @@ ENDPROC(sys_clone_wrapper)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
- mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
- mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
diff --git a/trunk/arch/arm/kernel/kprobes-decode.c b/trunk/arch/arm/kernel/kprobes-decode.c
index 2c1f0050c9c4..8bccbfa693ff 100644
--- a/trunk/arch/arm/kernel/kprobes-decode.c
+++ b/trunk/arch/arm/kernel/kprobes-decode.c
@@ -1162,12 +1162,11 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
{
/*
* MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
- * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx
+ * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx
* ALU op with S bit and Rd == 15 :
* cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
*/
- if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
- (insn & 0x0ff00000) == 0x03400000 || /* Undef */
+ if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */
(insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
return INSN_REJECTED;
@@ -1178,7 +1177,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
* *S (bit 20) updates condition codes
* ADC/SBC/RSC reads the C flag
*/
- insn &= 0xffff0fff; /* Rd = r0 */
+ insn &= 0xfff00fff; /* Rn = r0, Rd = r0 */
asi->insn[0] = insn;
asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c
index 49643b1467e6..ecbb0288e5dd 100644
--- a/trunk/arch/arm/kernel/perf_event.c
+++ b/trunk/arch/arm/kernel/perf_event.c
@@ -123,12 +123,6 @@ armpmu_get_max_events(void)
}
EXPORT_SYMBOL_GPL(armpmu_get_max_events);
-int perf_num_counters(void)
-{
- return armpmu_get_max_events();
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
@@ -227,56 +221,46 @@ armpmu_event_update(struct perf_event *event,
}
static void
-armpmu_read(struct perf_event *event)
+armpmu_disable(struct perf_event *event)
{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
- /* Don't read disabled counters! */
- if (hwc->idx < 0)
- return;
+ WARN_ON(idx < 0);
- armpmu_event_update(event, hwc, hwc->idx);
+ clear_bit(idx, cpuc->active_mask);
+ armpmu->disable(hwc, idx);
+
+ barrier();
+
+ armpmu_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
}
static void
-armpmu_stop(struct perf_event *event, int flags)
+armpmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- if (!armpmu)
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
return;
- /*
- * ARM pmu always has to update the counter, so ignore
- * PERF_EF_UPDATE, see comments in armpmu_start().
- */
- if (!(hwc->state & PERF_HES_STOPPED)) {
- armpmu->disable(hwc, hwc->idx);
- barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx);
- hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
- }
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
-armpmu_start(struct perf_event *event, int flags)
+armpmu_unthrottle(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- if (!armpmu)
- return;
-
- /*
- * ARM pmu always has to reprogram the period, so ignore
- * PERF_EF_RELOAD, see the comment below.
- */
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
- hwc->state = 0;
/*
* Set the period again. Some counters can't be stopped, so when we
- * were stopped we simply disabled the IRQ source and the counter
+ * were throttled we simply disabled the IRQ source and the counter
* may have been left counting. If we don't do this step then we may
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
@@ -285,33 +269,14 @@ armpmu_start(struct perf_event *event, int flags)
armpmu->enable(hwc, hwc->idx);
}
-static void
-armpmu_del(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- WARN_ON(idx < 0);
-
- clear_bit(idx, cpuc->active_mask);
- armpmu_stop(event, PERF_EF_UPDATE);
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
-
- perf_event_update_userpage(event);
-}
-
static int
-armpmu_add(struct perf_event *event, int flags)
+armpmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
- perf_pmu_disable(event->pmu);
-
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc);
if (idx < 0) {
@@ -328,19 +293,25 @@ armpmu_add(struct perf_event *event, int flags)
cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);
- hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- if (flags & PERF_EF_START)
- armpmu_start(event, PERF_EF_RELOAD);
+ /* Set the period for the event. */
+ armpmu_event_set_period(event, hwc, idx);
+
+ /* Enable the event. */
+ armpmu->enable(hwc, idx);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
- perf_pmu_enable(event->pmu);
return err;
}
-static struct pmu pmu;
+static struct pmu pmu = {
+ .enable = armpmu_enable,
+ .disable = armpmu_disable,
+ .unthrottle = armpmu_unthrottle,
+ .read = armpmu_read,
+};
static int
validate_event(struct cpu_hw_events *cpuc,
@@ -520,29 +491,20 @@ __hw_perf_event_init(struct perf_event *event)
return err;
}
-static int armpmu_event_init(struct perf_event *event)
+const struct pmu *
+hw_perf_event_init(struct perf_event *event)
{
int err = 0;
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
- return -ENOENT;
- }
-
if (!armpmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) {
- if (atomic_read(&active_events) > armpmu->num_events) {
+ if (atomic_read(&active_events) > perf_max_events) {
atomic_dec(&active_events);
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
}
mutex_lock(&pmu_reserve_mutex);
@@ -556,16 +518,17 @@ static int armpmu_event_init(struct perf_event *event)
}
if (err)
- return err;
+ return ERR_PTR(err);
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
- return err;
+ return err ? ERR_PTR(err) : &pmu;
}
-static void armpmu_enable(struct pmu *pmu)
+void
+hw_perf_enable(void)
{
/* Enable all of the perf events on hardware. */
int idx;
@@ -586,23 +549,13 @@ static void armpmu_enable(struct pmu *pmu)
armpmu->start();
}
-static void armpmu_disable(struct pmu *pmu)
+void
+hw_perf_disable(void)
{
if (armpmu)
armpmu->stop();
}
-static struct pmu pmu = {
- .pmu_enable = armpmu_enable,
- .pmu_disable = armpmu_disable,
- .event_init = armpmu_event_init,
- .add = armpmu_add,
- .del = armpmu_del,
- .start = armpmu_start,
- .stop = armpmu_stop,
- .read = armpmu_read,
-};
-
/*
* ARMv6 Performance counter handling code.
*
@@ -1092,7 +1045,7 @@ armv6pmu_handle_irq(int irq_num,
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
- irq_work_run();
+ perf_event_do_pending();
return IRQ_HANDLED;
}
@@ -2068,7 +2021,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
- irq_work_run();
+ perf_event_do_pending();
return IRQ_HANDLED;
}
@@ -2436,7 +2389,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
armpmu->disable(hwc, idx);
}
- irq_work_run();
+ perf_event_do_pending();
/*
* Re-enable the PMU.
@@ -2763,7 +2716,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
armpmu->disable(hwc, idx);
}
- irq_work_run();
+ perf_event_do_pending();
/*
* Re-enable the PMU.
@@ -2980,12 +2933,14 @@ init_hw_perf_events(void)
armpmu = &armv6pmu;
memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
sizeof(armv6_perf_cache_map));
+ perf_max_events = armv6pmu.num_events;
break;
case 0xB020: /* ARM11mpcore */
armpmu = &armv6mpcore_pmu;
memcpy(armpmu_perf_cache_map,
armv6mpcore_perf_cache_map,
sizeof(armv6mpcore_perf_cache_map));
+ perf_max_events = armv6mpcore_pmu.num_events;
break;
case 0xC080: /* Cortex-A8 */
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2997,6 +2952,7 @@ init_hw_perf_events(void)
/* Reset PMNC and read the nb of CNTx counters
supported */
armv7pmu.num_events = armv7_reset_read_pmnc();
+ perf_max_events = armv7pmu.num_events;
break;
case 0xC090: /* Cortex-A9 */
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -3008,6 +2964,7 @@ init_hw_perf_events(void)
/* Reset PMNC and read the nb of CNTx counters
supported */
armv7pmu.num_events = armv7_reset_read_pmnc();
+ perf_max_events = armv7pmu.num_events;
break;
}
/* Intel CPUs [xscale]. */
@@ -3018,11 +2975,13 @@ init_hw_perf_events(void)
armpmu = &xscale1pmu;
memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale1pmu.num_events;
break;
case 2:
armpmu = &xscale2pmu;
memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale2pmu.num_events;
break;
}
}
@@ -3032,10 +2991,9 @@ init_hw_perf_events(void)
arm_pmu_names[armpmu->id], armpmu->num_events);
} else {
pr_info("no hardware support available\n");
+ perf_max_events = -1;
}
- perf_pmu_register(&pmu);
-
return 0;
}
arch_initcall(init_hw_perf_events);
@@ -3043,6 +3001,13 @@ arch_initcall(init_hw_perf_events);
/*
* Callchain handling code.
*/
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
/*
* The registers we're interested in are at the end of the variable
@@ -3074,7 +3039,7 @@ user_backtrace(struct frame_tail *tail,
if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
return NULL;
- perf_callchain_store(entry, buftail.lr);
+ callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
@@ -3086,11 +3051,16 @@ user_backtrace(struct frame_tail *tail,
return buftail.fp - 1;
}
-void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
struct frame_tail *tail;
+ callchain_store(entry, PERF_CONTEXT_USER);
+
+ if (!user_mode(regs))
+ regs = task_pt_regs(current);
tail = (struct frame_tail *)regs->ARM_fp - 1;
@@ -3108,18 +3078,56 @@ callchain_trace(struct stackframe *fr,
void *data)
{
struct perf_callchain_entry *entry = data;
- perf_callchain_store(entry, fr->pc);
+ callchain_store(entry, fr->pc);
return 0;
}
-void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
struct stackframe fr;
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
fr.fp = regs->ARM_fp;
fr.sp = regs->ARM_sp;
fr.lr = regs->ARM_lr;
fr.pc = regs->ARM_pc;
walk_stackframe(&fr, callchain_trace, entry);
}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user)
+ perf_callchain_kernel(regs, entry);
+
+ if (current->mm)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}
diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
index 1276babf84d5..5e71ccd5e7d3 100644
--- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA21,
.scl_is_open_drain = 1,
- .udelay = 5, /* ~100 kHz */
+ .udelay = 2, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PB11,
.scl_is_open_drain = 1,
- .udelay = 5, /* ~100 kHz */
+ .udelay = 2, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi1_device = {
diff --git a/trunk/arch/arm/mach-at91/include/mach/system.h b/trunk/arch/arm/mach-at91/include/mach/system.h
index ee8db152592e..c80e090b3670 100644
--- a/trunk/arch/arm/mach-at91/include/mach/system.h
+++ b/trunk/arch/arm/mach-at91/include/mach/system.h
@@ -28,16 +28,17 @@
static inline void arch_idle(void)
{
+#ifndef CONFIG_DEBUG_KERNEL
/*
* Disable the processor clock. The processor will be automatically
* re-enabled by an interrupt or by a reset.
*/
at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-#ifndef CONFIG_CPU_ARM920T
+#else
/*
* Set the processor (CP15) into 'Wait for Interrupt' mode.
- * Post-RM9200 processors need this in conjunction with the above
- * to save power when idle.
+ * Unlike disabling the processor clock via the PMC (above)
+ * this allows the processor to be woken via JTAG.
*/
cpu_do_idle();
#endif
diff --git a/trunk/arch/arm/mach-bcmring/dma.c b/trunk/arch/arm/mach-bcmring/dma.c
index 77eb35c89cd0..29c0a911df26 100644
--- a/trunk/arch/arm/mach-bcmring/dma.c
+++ b/trunk/arch/arm/mach-bcmring/dma.c
@@ -691,7 +691,7 @@ int dma_init(void)
memset(&gDMA, 0, sizeof(gDMA));
- sema_init(&gDMA.lock, 0);
+ init_MUTEX_LOCKED(&gDMA.lock);
init_waitqueue_head(&gDMA.freeChannelQ);
/* Initialize the Hardware */
@@ -1574,7 +1574,7 @@ int dma_init_mem_map(DMA_MemMap_t *memMap)
{
memset(memMap, 0, sizeof(*memMap));
- sema_init(&memMap->lock, 1);
+ init_MUTEX(&memMap->lock);
return 0;
}
diff --git a/trunk/arch/arm/mach-davinci/dm355.c b/trunk/arch/arm/mach-davinci/dm355.c
index 9be261beae7d..3d996b659ff4 100644
--- a/trunk/arch/arm/mach-davinci/dm355.c
+++ b/trunk/arch/arm/mach-davinci/dm355.c
@@ -769,7 +769,8 @@ static struct map_desc dm355_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- .type = MT_MEMORY_NONCACHED,
+ /* MT_MEMORY_NONCACHED requires supersection alignment */
+ .type = MT_DEVICE,
},
};
diff --git a/trunk/arch/arm/mach-davinci/dm365.c b/trunk/arch/arm/mach-davinci/dm365.c
index 7781e35daec3..6b6f4c643709 100644
--- a/trunk/arch/arm/mach-davinci/dm365.c
+++ b/trunk/arch/arm/mach-davinci/dm365.c
@@ -969,7 +969,8 @@ static struct map_desc dm365_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- .type = MT_MEMORY_NONCACHED,
+ /* MT_MEMORY_NONCACHED requires supersection alignment */
+ .type = MT_DEVICE,
},
};
diff --git a/trunk/arch/arm/mach-davinci/dm644x.c b/trunk/arch/arm/mach-davinci/dm644x.c
index 5e5b0a7831fb..40fec315c99a 100644
--- a/trunk/arch/arm/mach-davinci/dm644x.c
+++ b/trunk/arch/arm/mach-davinci/dm644x.c
@@ -653,7 +653,8 @@ static struct map_desc dm644x_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00008000),
.length = SZ_16K,
- .type = MT_MEMORY_NONCACHED,
+ /* MT_MEMORY_NONCACHED requires supersection alignment */
+ .type = MT_DEVICE,
},
};
diff --git a/trunk/arch/arm/mach-davinci/dm646x.c b/trunk/arch/arm/mach-davinci/dm646x.c
index 26e8a9c7f50b..e4a3df1872ac 100644
--- a/trunk/arch/arm/mach-davinci/dm646x.c
+++ b/trunk/arch/arm/mach-davinci/dm646x.c
@@ -737,7 +737,8 @@ static struct map_desc dm646x_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
- .type = MT_MEMORY_NONCACHED,
+ /* MT_MEMORY_NONCACHED requires supersection alignment */
+ .type = MT_DEVICE,
},
};
diff --git a/trunk/arch/arm/mach-dove/include/mach/io.h b/trunk/arch/arm/mach-dove/include/mach/io.h
index eb4936ff90ad..3b3e4721ce2e 100644
--- a/trunk/arch/arm/mach-dove/include/mach/io.h
+++ b/trunk/arch/arm/mach-dove/include/mach/io.h
@@ -13,8 +13,8 @@
#define IO_SPACE_LIMIT 0xffffffff
-#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
- DOVE_PCIE0_IO_VIRT_BASE))
-#define __mem_pci(a) (a)
+#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
+ DOVE_PCIE0_IO_VIRT_BASE))
+#define __mem_pci(a) (a)
#endif
diff --git a/trunk/arch/arm/mach-ep93xx/dma-m2p.c b/trunk/arch/arm/mach-ep93xx/dma-m2p.c
index a696d354b1f8..8904ca4e2e24 100644
--- a/trunk/arch/arm/mach-ep93xx/dma-m2p.c
+++ b/trunk/arch/arm/mach-ep93xx/dma-m2p.c
@@ -276,7 +276,7 @@ static void channel_disable(struct m2p_channel *ch)
v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
m2p_set_control(ch, v);
- while (m2p_channel_state(ch) >= STATE_ON)
+ while (m2p_channel_state(ch) == STATE_ON)
cpu_relax();
m2p_set_control(ch, 0x0);
diff --git a/trunk/arch/arm/mach-imx/Kconfig b/trunk/arch/arm/mach-imx/Kconfig
index 2f7e2728970d..c5c0369bb481 100644
--- a/trunk/arch/arm/mach-imx/Kconfig
+++ b/trunk/arch/arm/mach-imx/Kconfig
@@ -122,7 +122,6 @@ config MACH_CPUIMX27
select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_NAND
- select MXC_ULPI if USB_ULPI
help
Include support for Eukrea CPUIMX27 platform. This includes
specific configurations for the module and its peripherals.
diff --git a/trunk/arch/arm/mach-imx/mach-cpuimx27.c b/trunk/arch/arm/mach-imx/mach-cpuimx27.c
index 6830afd1d2ba..339150ab0ea5 100644
--- a/trunk/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/trunk/arch/arm/mach-imx/mach-cpuimx27.c
@@ -259,7 +259,7 @@ static void __init eukrea_cpuimx27_init(void)
i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
- imx27_add_i2c_imx0(&cpuimx27_i2c1_data);
+ imx27_add_i2c_imx1(&cpuimx27_i2c1_data);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
diff --git a/trunk/arch/arm/mach-ixp4xx/common-pci.c b/trunk/arch/arm/mach-ixp4xx/common-pci.c
index 24498a932ba6..61cd4d64b985 100644
--- a/trunk/arch/arm/mach-ixp4xx/common-pci.c
+++ b/trunk/arch/arm/mach-ixp4xx/common-pci.c
@@ -503,14 +503,6 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
}
-int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- if (mask >= SZ_64M - 1)
- return 0;
-
- return -EIO;
-}
-
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
diff --git a/trunk/arch/arm/mach-ixp4xx/include/mach/hardware.h b/trunk/arch/arm/mach-ixp4xx/include/mach/hardware.h
index 8138371c406e..f91ca6d4fbe8 100644
--- a/trunk/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/trunk/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -26,8 +26,6 @@
#define PCIBIOS_MAX_MEM 0x4BFFFFFF
#endif
-#define ARCH_HAS_DMA_SET_COHERENT_MASK
-
#define pcibios_assign_all_busses() 1
/* Register locations and bits */
diff --git a/trunk/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/trunk/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 6e924b398919..93fc2ec95e76 100644
--- a/trunk/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/trunk/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -38,7 +38,7 @@
#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
-#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
+#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000
#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
diff --git a/trunk/arch/arm/mach-kirkwood/pcie.c b/trunk/arch/arm/mach-kirkwood/pcie.c
index 513ad3102d7c..55e7f00836b7 100644
--- a/trunk/arch/arm/mach-kirkwood/pcie.c
+++ b/trunk/arch/arm/mach-kirkwood/pcie.c
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 0 I/O Space";
- pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
+ pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 1 I/O Space";
- pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
+ pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
diff --git a/trunk/arch/arm/mach-mmp/include/mach/system.h b/trunk/arch/arm/mach-mmp/include/mach/system.h
index 1a8a25edb1b4..4f5b0e0ce6cf 100644
--- a/trunk/arch/arm/mach-mmp/include/mach/system.h
+++ b/trunk/arch/arm/mach-mmp/include/mach/system.h
@@ -9,8 +9,6 @@
#ifndef __ASM_MACH_SYSTEM_H
#define __ASM_MACH_SYSTEM_H
-#include
-
static inline void arch_idle(void)
{
cpu_do_idle();
@@ -18,9 +16,6 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
- if (cpu_is_pxa168())
- cpu_reset(0xffff0000);
- else
- cpu_reset(0);
+ cpu_reset(0);
}
#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 58093d9e07be..50d5939a78f1 100644
--- a/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/trunk/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -312,7 +312,8 @@ static int pxa_set_target(struct cpufreq_policy *policy,
freqs.cpu = policy->cpu;
if (freq_debug)
- pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
+ pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
+ "(SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
diff --git a/trunk/arch/arm/mach-pxa/include/mach/hardware.h b/trunk/arch/arm/mach-pxa/include/mach/hardware.h
index 814f1458a06a..7f64d24cd564 100644
--- a/trunk/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/trunk/arch/arm/mach-pxa/include/mach/hardware.h
@@ -264,35 +264,23 @@
* <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
* == 0x3 for pxa300/pxa310/pxa320
*/
-#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
#define __cpu_is_pxa2xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id <= 0x2; \
})
-#else
-#define __cpu_is_pxa2xx(id) (0)
-#endif
-#ifdef CONFIG_PXA3xx
#define __cpu_is_pxa3xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id == 0x3; \
})
-#else
-#define __cpu_is_pxa3xx(id) (0)
-#endif
-#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
#define __cpu_is_pxa93x(id) \
({ \
unsigned int _id = (id) >> 4 & 0xfff; \
_id == 0x683 || _id == 0x693; \
})
-#else
-#define __cpu_is_pxa93x(id) (0)
-#endif
#define cpu_is_pxa2xx() \
({ \
@@ -321,7 +309,7 @@ extern unsigned long get_clock_tick_rate(void);
#define PCIBIOS_MIN_IO 0
#define PCIBIOS_MIN_MEM 0
#define pcibios_assign_all_busses() 1
-#define ARCH_HAS_DMA_SET_COHERENT_MASK
#endif
+
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/trunk/arch/arm/mach-pxa/include/mach/io.h b/trunk/arch/arm/mach-pxa/include/mach/io.h
index fdca3be47d9b..262691fb97d8 100644
--- a/trunk/arch/arm/mach-pxa/include/mach/io.h
+++ b/trunk/arch/arm/mach-pxa/include/mach/io.h
@@ -6,8 +6,6 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
-#include
-
#define IO_SPACE_LIMIT 0xffffffff
/*
diff --git a/trunk/arch/arm/mach-pxa/palm27x.c b/trunk/arch/arm/mach-pxa/palm27x.c
index 405b92a29793..77ad6d34ab5b 100644
--- a/trunk/arch/arm/mach-pxa/palm27x.c
+++ b/trunk/arch/arm/mach-pxa/palm27x.c
@@ -469,13 +469,9 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
},
};
-static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
- .use_pio = 1,
-};
-
void __init palm27x_pmic_init(void)
{
i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
- pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
+ pxa27x_set_i2c_power_info(NULL);
}
#endif
diff --git a/trunk/arch/arm/mach-pxa/vpac270.c b/trunk/arch/arm/mach-pxa/vpac270.c
index 37d6173bbb66..c9b747cedea8 100644
--- a/trunk/arch/arm/mach-pxa/vpac270.c
+++ b/trunk/arch/arm/mach-pxa/vpac270.c
@@ -240,7 +240,6 @@ static void __init vpac270_onenand_init(void) {}
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data vpac270_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .gpio_power = -1,
.gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
.gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
.detect_delay_ms = 200,
diff --git a/trunk/arch/arm/mach-s3c64xx/dev-spi.c b/trunk/arch/arm/mach-s3c64xx/dev-spi.c
index 405e62128917..a492b982aa06 100644
--- a/trunk/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/trunk/arch/arm/mach-s3c64xx/dev-spi.c
@@ -18,11 +18,10 @@
#include
#include
#include
-#include
#include
#include
-#include
+#include
static char *spi_src_clks[] = {
[S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
diff --git a/trunk/arch/arm/mach-s3c64xx/mach-real6410.c b/trunk/arch/arm/mach-s3c64xx/mach-real6410.c
index e130379ba0e8..5c07d013b23d 100644
--- a/trunk/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/trunk/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -30,73 +30,73 @@
#include
#include
-#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
-#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
-#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
+#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
+#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
[0] = {
- .hwport = 0,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[1] = {
- .hwport = 1,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[2] = {
- .hwport = 2,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
[3] = {
- .hwport = 3,
- .flags = 0,
- .ucon = UCON,
- .ulcon = ULCON,
- .ufcon = UFCON,
+ .hwport = 3,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
},
};
/* DM9000AEP 10/100 ethernet controller */
static struct resource real6410_dm9k_resource[] = {
- [0] = {
- .start = S3C64XX_PA_XM0CSN1,
- .end = S3C64XX_PA_XM0CSN1 + 1,
- .flags = IORESOURCE_MEM
- },
- [1] = {
- .start = S3C64XX_PA_XM0CSN1 + 4,
- .end = S3C64XX_PA_XM0CSN1 + 5,
- .flags = IORESOURCE_MEM
- },
- [2] = {
- .start = S3C_EINT(7),
- .end = S3C_EINT(7),
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
- }
+ [0] = {
+ .start = S3C64XX_PA_XM0CSN1,
+ .end = S3C64XX_PA_XM0CSN1 + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = S3C64XX_PA_XM0CSN1 + 4,
+ .end = S3C64XX_PA_XM0CSN1 + 5,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = S3C_EINT(7),
+ .end = S3C_EINT(7),
+ .flags = IORESOURCE_IRQ,
+ }
};
static struct dm9000_plat_data real6410_dm9k_pdata = {
- .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
};
static struct platform_device real6410_device_eth = {
- .name = "dm9000",
- .id = -1,
- .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
- .resource = real6410_dm9k_resource,
- .dev = {
- .platform_data = &real6410_dm9k_pdata,
- },
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
+ .resource = real6410_dm9k_resource,
+ .dev = {
+ .platform_data = &real6410_dm9k_pdata,
+ },
};
static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
/* set timing for nCS1 suitable for ethernet chip */
__raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
- (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
- (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
- (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
- (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
- (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
- (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
+ (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
+ (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
+ (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
+ (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
+ (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
}
diff --git a/trunk/arch/arm/mach-s5p6440/cpu.c b/trunk/arch/arm/mach-s5p6440/cpu.c
index ec592e866054..526f33adb31d 100644
--- a/trunk/arch/arm/mach-s5p6440/cpu.c
+++ b/trunk/arch/arm/mach-s5p6440/cpu.c
@@ -19,7 +19,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/arch/arm/mach-s5p6442/cpu.c b/trunk/arch/arm/mach-s5p6442/cpu.c
index 70ac681af72b..a48fb553fd01 100644
--- a/trunk/arch/arm/mach-s5p6442/cpu.c
+++ b/trunk/arch/arm/mach-s5p6442/cpu.c
@@ -19,7 +19,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/arch/arm/mach-s5pc100/cpu.c b/trunk/arch/arm/mach-s5pc100/cpu.c
index cd1afbce83e2..251c92ac5b22 100644
--- a/trunk/arch/arm/mach-s5pc100/cpu.c
+++ b/trunk/arch/arm/mach-s5pc100/cpu.c
@@ -21,7 +21,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/arch/arm/mach-s5pv210/clock.c b/trunk/arch/arm/mach-s5pv210/clock.c
index d562670e1b0b..af91fefef2c6 100644
--- a/trunk/arch/arm/mach-s5pv210/clock.c
+++ b/trunk/arch/arm/mach-s5pv210/clock.c
@@ -173,6 +173,11 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
}
+static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable);
+}
+
static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
@@ -275,24 +280,6 @@ static struct clk init_clocks_disable[] = {
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1<<29),
- }, {
- .name = "fimc",
- .id = 0,
- .parent = &clk_hclk_dsys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 24),
- }, {
- .name = "fimc",
- .id = 1,
- .parent = &clk_hclk_dsys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 25),
- }, {
- .name = "fimc",
- .id = 2,
- .parent = &clk_hclk_dsys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 26),
}, {
.name = "otg",
.id = -1,
@@ -370,7 +357,7 @@ static struct clk init_clocks_disable[] = {
.id = 1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 10),
+ .ctrlbit = (1<<8),
}, {
.name = "i2c",
.id = 2,
diff --git a/trunk/arch/arm/mach-s5pv210/cpu.c b/trunk/arch/arm/mach-s5pv210/cpu.c
index 245b82b53df4..b9f4d677cf55 100644
--- a/trunk/arch/arm/mach-s5pv210/cpu.c
+++ b/trunk/arch/arm/mach-s5pv210/cpu.c
@@ -19,7 +19,6 @@
#include
#include
#include
-#include
#include
#include
@@ -48,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
{
.virtual = (unsigned long)S5P_VA_SYSTIMER,
.pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
- .length = SZ_4K,
+ .length = SZ_1M,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC2,
diff --git a/trunk/arch/arm/mach-u300/include/mach/gpio.h b/trunk/arch/arm/mach-u300/include/mach/gpio.h
index d5a71abcbaea..7b1fc984abb6 100644
--- a/trunk/arch/arm/mach-u300/include/mach/gpio.h
+++ b/trunk/arch/arm/mach-u300/include/mach/gpio.h
@@ -273,9 +273,6 @@ extern void gpio_pullup(unsigned gpio, int value);
extern int gpio_get_value(unsigned gpio);
extern void gpio_set_value(unsigned gpio, int value);
-#define gpio_get_value_cansleep gpio_get_value
-#define gpio_set_value_cansleep gpio_set_value
-
/* wrappers to sleep-enable the previous two functions */
static inline unsigned gpio_to_irq(unsigned gpio)
{
diff --git a/trunk/arch/arm/mach-vexpress/ct-ca9x4.c b/trunk/arch/arm/mach-vexpress/ct-ca9x4.c
index 71fb17349520..577df6cccb08 100644
--- a/trunk/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/trunk/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -68,7 +68,7 @@ static void __init ct_ca9x4_init_irq(void)
}
#if 0
-static void __init ct_ca9x4_timer_init(void)
+static void ct_ca9x4_timer_init(void)
{
writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
@@ -222,18 +222,12 @@ static struct platform_device pmu_device = {
.resource = pmu_resources,
};
-static void __init ct_ca9x4_init(void)
+static void ct_ca9x4_init(void)
{
int i;
#ifdef CONFIG_CACHE_L2X0
- void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
-
- /* set RAM latencies to 1 cycle for this core tile. */
- writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
- writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
-
- l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
+ l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/trunk/arch/arm/mach-vexpress/v2m.c b/trunk/arch/arm/mach-vexpress/v2m.c
index 7eaa232180a5..817f0ad38a0b 100644
--- a/trunk/arch/arm/mach-vexpress/v2m.c
+++ b/trunk/arch/arm/mach-vexpress/v2m.c
@@ -48,7 +48,7 @@ void __init v2m_map_io(struct map_desc *tile, size_t num)
}
-static void __init v2m_timer_init(void)
+static void v2m_timer_init(void)
{
writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
diff --git a/trunk/arch/arm/mm/alignment.c b/trunk/arch/arm/mm/alignment.c
index 724ba3bce72c..d073b64ae87e 100644
--- a/trunk/arch/arm/mm/alignment.c
+++ b/trunk/arch/arm/mm/alignment.c
@@ -885,23 +885,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
- else {
- /*
- * We're about to disable the alignment trap and return to
- * user space. But if an interrupt occurs before actually
- * reaching user space, then the IRQ vector entry code will
- * notice that we were still in kernel space and therefore
- * the alignment trap won't be re-enabled in that case as it
- * is presumed to be always on from kernel space.
- * Let's prevent that race by disabling interrupts here (they
- * are disabled on the way back to user space anyway in
- * entry-common.S) and disable the alignment trap only if
- * there is no work pending for this thread.
- */
- raw_local_irq_disable();
- if (!(current_thread_info()->flags & _TIF_WORK_MASK))
- set_cr(cr_no_alignment);
- }
+ else
+ set_cr(cr_no_alignment);
return 0;
}
diff --git a/trunk/arch/arm/mm/ioremap.c b/trunk/arch/arm/mm/ioremap.c
index 17e7b0b57e49..ab506272b2d3 100644
--- a/trunk/arch/arm/mm/ioremap.c
+++ b/trunk/arch/arm/mm/ioremap.c
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
- if (pfn_valid(pfn)) {
- printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
- KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
- KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
- WARN_ON(1);
- }
+ if (WARN_ON(pfn_valid(pfn)))
+ return NULL;
type = get_mem_type(mtype);
if (!type)
diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c
index e8ed9dc461fe..6e1c4f6a2b3f 100644
--- a/trunk/arch/arm/mm/mmu.c
+++ b/trunk/arch/arm/mm/mmu.c
@@ -15,7 +15,6 @@
#include
#include
#include
-#include
#include
#include
@@ -247,9 +246,6 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_WRITE | L_PTE_EXEC,
- .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -258,9 +254,6 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
- .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -418,12 +411,9 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent() && cpu_is_xsc3()) {
+ if (arch_is_coherent() && cpu_is_xsc3())
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
- }
+
/*
* ARMv6 and above have extended page tables.
*/
@@ -448,9 +438,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
#endif
}
@@ -487,8 +475,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@@ -512,19 +498,6 @@ static void __init build_mem_type_table(void)
}
}
-#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot)
-{
- if (!pfn_valid(pfn))
- return pgprot_noncached(vma_prot);
- else if (file->f_flags & O_SYNC)
- return pgprot_writecombine(vma_prot);
- return vma_prot;
-}
-EXPORT_SYMBOL(phys_mem_access_prot);
-#endif
-
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S
index 197f21bed5e9..6a8506d99ee9 100644
--- a/trunk/arch/arm/mm/proc-v7.S
+++ b/trunk/arch/arm/mm/proc-v7.S
@@ -186,14 +186,13 @@ cpu_v7_name:
* It is assumed that:
* - cache type register is implemented
*/
-__v7_ca9mp_setup:
+__v7_setup:
#ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
-__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all
@@ -202,16 +201,11 @@ __v7_setup:
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
- bne 3f
+ bne 2f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
- orr r6, r6, r5, lsr #20-4 @ combine variant and revision
- ubfx r0, r0, #4, #12 @ primary part number
+ orr r0, r6, r5, lsr #20-4 @ combine variant and revision
- /* Cortex-A8 Errata */
- ldr r10, =0x00000c08 @ Cortex-A8 primary part number
- teq r0, r10
- bne 2f
#ifdef CONFIG_ARM_ERRATA_430973
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -219,50 +213,21 @@ __v7_setup:
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
- teq r6, #0x20 @ only present in r2p0
+ teq r0, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
- teq r6, #0x20 @ only present in r2p0
+ teq r0, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
- b 3f
- /* Cortex-A9 Errata */
-2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
- teq r0, r10
- bne 3f
-#ifdef CONFIG_ARM_ERRATA_742230
- cmp r6, #0x22 @ only present up to r2p2
- mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orrle r10, r10, #1 << 4 @ set bit #4
- mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
-#ifdef CONFIG_ARM_ERRATA_742231
- teq r6, #0x20 @ present in r2p0
- teqne r6, #0x21 @ present in r2p1
- teqne r6, #0x22 @ present in r2p2
- mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orreq r10, r10, #1 << 12 @ set bit #12
- orreq r10, r10, #1 << 22 @ set bit #22
- mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
-#ifdef CONFIG_ARM_ERRATA_743622
- teq r6, #0x20 @ present in r2p0
- teqne r6, #0x21 @ present in r2p1
- teqne r6, #0x22 @ present in r2p2
- mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orreq r10, r10, #1 << 6 @ set bit #6
- mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
-
-3: mov r10, #0
+2: mov r10, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
@@ -358,29 +323,6 @@ cpu_elf_name:
.section ".proc.info.init", #alloc, #execinstr
- .type __v7_ca9mp_proc_info, #object
-__v7_ca9mp_proc_info:
- .long 0x410fc090 @ Required ID value
- .long 0xff0ffff0 @ Mask for ID
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ | \
- PMD_FLAGS
- .long PMD_TYPE_SECT | \
- PMD_SECT_XN | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __v7_ca9mp_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
- .long cpu_v7_name
- .long v7_processor_functions
- .long v7wbi_tlb_fns
- .long v6_user_fns
- .long v7_cache_fns
- .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
-
/*
* Match any ARMv7 processor core.
*/
diff --git a/trunk/arch/arm/oprofile/Makefile b/trunk/arch/arm/oprofile/Makefile
index b2215c61cdf0..e666eafed152 100644
--- a/trunk/arch/arm/oprofile/Makefile
+++ b/trunk/arch/arm/oprofile/Makefile
@@ -6,8 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-ifeq ($(CONFIG_HW_PERF_EVENTS),y)
-DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
-endif
-
oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/trunk/arch/arm/oprofile/common.c b/trunk/arch/arm/oprofile/common.c
index 8aa974491dfc..0691176899ff 100644
--- a/trunk/arch/arm/oprofile/common.c
+++ b/trunk/arch/arm/oprofile/common.c
@@ -25,10 +25,138 @@
#include
#ifdef CONFIG_HW_PERF_EVENTS
-char *op_name_from_perf_id(void)
+/*
+ * Per performance monitor configuration as set via oprofilefs.
+ */
+struct op_counter_config {
+ unsigned long count;
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long unit_mask;
+ unsigned long kernel;
+ unsigned long user;
+ struct perf_event_attr attr;
+};
+
+static int op_arm_enabled;
+static DEFINE_MUTEX(op_arm_mutex);
+
+static struct op_counter_config *counter_config;
+static struct perf_event **perf_events[nr_cpumask_bits];
+static int perf_num_counters;
+
+/*
+ * Overflow callback for oprofile.
+ */
+static void op_overflow_handler(struct perf_event *event, int unused,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ int id;
+ u32 cpu = smp_processor_id();
+
+ for (id = 0; id < perf_num_counters; ++id)
+ if (perf_events[cpu][id] == event)
+ break;
+
+ if (id != perf_num_counters)
+ oprofile_add_sample(regs, id);
+ else
+ pr_warning("oprofile: ignoring spurious overflow "
+ "on cpu %u\n", cpu);
+}
+
+/*
+ * Called by op_arm_setup to create perf attributes to mirror the oprofile
+ * settings in counter_config. Attributes are created as `pinned' events and
+ * so are permanently scheduled on the PMU.
+ */
+static void op_perf_setup(void)
+{
+ int i;
+ u32 size = sizeof(struct perf_event_attr);
+ struct perf_event_attr *attr;
+
+ for (i = 0; i < perf_num_counters; ++i) {
+ attr = &counter_config[i].attr;
+ memset(attr, 0, size);
+ attr->type = PERF_TYPE_RAW;
+ attr->size = size;
+ attr->config = counter_config[i].event;
+ attr->sample_period = counter_config[i].count;
+ attr->pinned = 1;
+ }
+}
+
+static int op_create_counter(int cpu, int event)
+{
+ int ret = 0;
+ struct perf_event *pevent;
+
+ if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
+ return ret;
+
+ pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
+ cpu, -1,
+ op_overflow_handler);
+
+ if (IS_ERR(pevent)) {
+ ret = PTR_ERR(pevent);
+ } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+ pr_warning("oprofile: failed to enable event %d "
+ "on CPU %d\n", event, cpu);
+ ret = -EBUSY;
+ } else {
+ perf_events[cpu][event] = pevent;
+ }
+
+ return ret;
+}
+
+static void op_destroy_counter(int cpu, int event)
+{
+ struct perf_event *pevent = perf_events[cpu][event];
+
+ if (pevent) {
+ perf_event_release_kernel(pevent);
+ perf_events[cpu][event] = NULL;
+ }
+}
+
+/*
+ * Called by op_arm_start to create active perf events based on the
+ * perviously configured attributes.
+ */
+static int op_perf_start(void)
+{
+ int cpu, event, ret = 0;
+
+ for_each_online_cpu(cpu) {
+ for (event = 0; event < perf_num_counters; ++event) {
+ ret = op_create_counter(cpu, event);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * Called by op_arm_stop at the end of a profiling run.
+ */
+static void op_perf_stop(void)
{
- enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
+ int cpu, event;
+ for_each_online_cpu(cpu)
+ for (event = 0; event < perf_num_counters; ++event)
+ op_destroy_counter(cpu, event);
+}
+
+
+static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
+{
switch (id) {
case ARM_PERF_PMU_ID_XSCALE1:
return "arm/xscale1";
@@ -47,6 +175,116 @@ char *op_name_from_perf_id(void)
}
}
+static int op_arm_create_files(struct super_block *sb, struct dentry *root)
+{
+ unsigned int i;
+
+ for (i = 0; i < perf_num_counters; i++) {
+ struct dentry *dir;
+ char buf[4];
+
+ snprintf(buf, sizeof buf, "%d", i);
+ dir = oprofilefs_mkdir(sb, root, buf);
+ oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
+ oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
+ oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
+ oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
+ oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
+ oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+ }
+
+ return 0;
+}
+
+static int op_arm_setup(void)
+{
+ spin_lock(&oprofilefs_lock);
+ op_perf_setup();
+ spin_unlock(&oprofilefs_lock);
+ return 0;
+}
+
+static int op_arm_start(void)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&op_arm_mutex);
+ if (!op_arm_enabled) {
+ ret = 0;
+ op_perf_start();
+ op_arm_enabled = 1;
+ }
+ mutex_unlock(&op_arm_mutex);
+ return ret;
+}
+
+static void op_arm_stop(void)
+{
+ mutex_lock(&op_arm_mutex);
+ if (op_arm_enabled)
+ op_perf_stop();
+ op_arm_enabled = 0;
+ mutex_unlock(&op_arm_mutex);
+}
+
+#ifdef CONFIG_PM
+static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
+{
+ mutex_lock(&op_arm_mutex);
+ if (op_arm_enabled)
+ op_perf_stop();
+ mutex_unlock(&op_arm_mutex);
+ return 0;
+}
+
+static int op_arm_resume(struct platform_device *dev)
+{
+ mutex_lock(&op_arm_mutex);
+ if (op_arm_enabled && op_perf_start())
+ op_arm_enabled = 0;
+ mutex_unlock(&op_arm_mutex);
+ return 0;
+}
+
+static struct platform_driver oprofile_driver = {
+ .driver = {
+ .name = "arm-oprofile",
+ },
+ .resume = op_arm_resume,
+ .suspend = op_arm_suspend,
+};
+
+static struct platform_device *oprofile_pdev;
+
+static int __init init_driverfs(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&oprofile_driver);
+ if (ret)
+ goto out;
+
+ oprofile_pdev = platform_device_register_simple(
+ oprofile_driver.driver.name, 0, NULL, 0);
+ if (IS_ERR(oprofile_pdev)) {
+ ret = PTR_ERR(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
+ }
+
+out:
+ return ret;
+}
+
+static void exit_driverfs(void)
+{
+ platform_device_unregister(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
+}
+#else
+static int __init init_driverfs(void) { return 0; }
+#define exit_driverfs() do { } while (0)
+#endif /* CONFIG_PM */
+
static int report_trace(struct stackframe *frame, void *d)
{
unsigned int *depth = d;
@@ -111,14 +349,72 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
+ int cpu, ret = 0;
+
+ perf_num_counters = armpmu_get_max_events();
+
+ counter_config = kcalloc(perf_num_counters,
+ sizeof(struct op_counter_config), GFP_KERNEL);
+
+ if (!counter_config) {
+ pr_info("oprofile: failed to allocate %d "
+ "counters\n", perf_num_counters);
+ return -ENOMEM;
+ }
+
+ ret = init_driverfs();
+ if (ret) {
+ kfree(counter_config);
+ return ret;
+ }
+
+ for_each_possible_cpu(cpu) {
+ perf_events[cpu] = kcalloc(perf_num_counters,
+ sizeof(struct perf_event *), GFP_KERNEL);
+ if (!perf_events[cpu]) {
+ pr_info("oprofile: failed to allocate %d perf events "
+ "for cpu %d\n", perf_num_counters, cpu);
+ while (--cpu >= 0)
+ kfree(perf_events[cpu]);
+ return -ENOMEM;
+ }
+ }
+
ops->backtrace = arm_backtrace;
+ ops->create_files = op_arm_create_files;
+ ops->setup = op_arm_setup;
+ ops->start = op_arm_start;
+ ops->stop = op_arm_stop;
+ ops->shutdown = op_arm_stop;
+ ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
+
+ if (!ops->cpu_type)
+ ret = -ENODEV;
+ else
+ pr_info("oprofile: using %s\n", ops->cpu_type);
- return oprofile_perf_init(ops);
+ return ret;
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
- oprofile_perf_exit();
+ int cpu, id;
+ struct perf_event *event;
+
+ if (*perf_events) {
+ exit_driverfs();
+ for_each_possible_cpu(cpu) {
+ for (id = 0; id < perf_num_counters; ++id) {
+ event = perf_events[cpu][id];
+ if (event != NULL)
+ perf_event_release_kernel(event);
+ }
+ kfree(perf_events[cpu]);
+ }
+ }
+
+ if (counter_config)
+ kfree(counter_config);
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)
@@ -126,5 +422,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
pr_info("oprofile: hardware counters not available\n");
return -ENODEV;
}
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/trunk/arch/arm/plat-nomadik/timer.c b/trunk/arch/arm/plat-nomadik/timer.c
index aedf9c1d645e..ea3ca86c5283 100644
--- a/trunk/arch/arm/plat-nomadik/timer.c
+++ b/trunk/arch/arm/plat-nomadik/timer.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/plat-nomadik/timer.c
+ * linux/arch/arm/mach-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
cr = readl(mtu_base + MTU_CR(1));
writel(0, mtu_base + MTU_LR(1));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
- writel(1 << 1, mtu_base + MTU_IMSC);
+ writel(0x2, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@@ -131,23 +131,25 @@ void __init nmdk_timer_init(void)
{
unsigned long rate;
struct clk *clk0;
- u32 cr = MTU_CRn_32BITS;
+ struct clk *clk1;
+ u32 cr;
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
+ clk1 = clk_get_sys("mtu1", NULL);
+ BUG_ON(IS_ERR(clk1));
+
clk_enable(clk0);
+ clk_enable(clk1);
/*
- * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
- * for ux500.
- * Use a divide-by-16 counter if the tick rate is more than 32MHz.
- * At 32 MHz, the timer (with 32 bit counter) can be programmed
- * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
- * with 16 gives too low timer resolution.
+ * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
+ * use a divide-by-16 counter if it's more than 16MHz
*/
+ cr = MTU_CRn_32BITS;;
rate = clk_get_rate(clk0);
- if (rate > 32000000) {
+ if (rate > 16 << 20) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
@@ -168,8 +170,15 @@ void __init nmdk_timer_init(void)
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
- /* Timer 1 is used for events */
-
+ /* Timer 1 is used for events, fix according to rate */
+ cr = MTU_CRn_32BITS;
+ rate = clk_get_rate(clk1);
+ if (rate > 16 << 20) {
+ rate /= 16;
+ cr |= MTU_CRn_PRESCALE_16;
+ } else {
+ cr |= MTU_CRn_PRESCALE_1;
+ }
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
diff --git a/trunk/arch/arm/plat-omap/Kconfig b/trunk/arch/arm/plat-omap/Kconfig
index a92cb499313f..e39a417a368d 100644
--- a/trunk/arch/arm/plat-omap/Kconfig
+++ b/trunk/arch/arm/plat-omap/Kconfig
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES
config OMAP_DEBUG_LEDS
bool
depends on OMAP_DEBUG_DEVICES
- default y if LEDS_CLASS
+ default y if LEDS
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"
diff --git a/trunk/arch/arm/plat-omap/iommu.c b/trunk/arch/arm/plat-omap/iommu.c
index 6cd151b31bc5..a202a2ce6e3d 100644
--- a/trunk/arch/arm/plat-omap/iommu.c
+++ b/trunk/arch/arm/plat-omap/iommu.c
@@ -320,7 +320,6 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
if ((start <= da) && (da < start + bytes)) {
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
__func__, start, da, bytes);
- iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
diff --git a/trunk/arch/arm/plat-omap/mcbsp.c b/trunk/arch/arm/plat-omap/mcbsp.c
index 0c8612fd8312..e31496e35b0f 100644
--- a/trunk/arch/arm/plat-omap/mcbsp.c
+++ b/trunk/arch/arm/plat-omap/mcbsp.c
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
/* Writing zero to RSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
} else {
- complete(&mcbsp_rx->rx_irq_completion);
+ complete(&mcbsp_rx->tx_irq_completion);
}
return IRQ_HANDLED;
diff --git a/trunk/arch/arm/plat-omap/sram.c b/trunk/arch/arm/plat-omap/sram.c
index 10b3b4c63372..226b2e858d6c 100644
--- a/trunk/arch/arm/plat-omap/sram.c
+++ b/trunk/arch/arm/plat-omap/sram.c
@@ -220,7 +220,20 @@ void __init omap_map_sram(void)
if (omap_sram_size == 0)
return;
+ if (cpu_is_omap24xx()) {
+ omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
+
+ base = OMAP2_SRAM_PA;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+ }
+
if (cpu_is_omap34xx()) {
+ omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
+ base = OMAP3_SRAM_PA;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+
/*
* SRAM must be marked as non-cached on OMAP3 since the
* CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -231,11 +244,13 @@ void __init omap_map_sram(void)
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
}
- omap_sram_io_desc[0].virtual = omap_sram_base;
- base = omap_sram_start;
- base = ROUND_DOWN(base, PAGE_SIZE);
- omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
- omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
+ if (cpu_is_omap44xx()) {
+ omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
+ base = OMAP4_SRAM_PA;
+ base = ROUND_DOWN(base, PAGE_SIZE);
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
+ }
+ omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/trunk/arch/arm/plat-s5p/dev-fimc0.c b/trunk/arch/arm/plat-s5p/dev-fimc0.c
index 608770fc1531..d3f1a9b5d2b5 100644
--- a/trunk/arch/arm/plat-s5p/dev-fimc0.c
+++ b/trunk/arch/arm/plat-s5p/dev-fimc0.c
@@ -10,7 +10,6 @@
*/
#include
-#include
#include
#include
#include
@@ -19,7 +18,7 @@
static struct resource s5p_fimc0_resource[] = {
[0] = {
.start = S5P_PA_FIMC0,
- .end = S5P_PA_FIMC0 + SZ_4K - 1,
+ .end = S5P_PA_FIMC0 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -29,15 +28,9 @@ static struct resource s5p_fimc0_resource[] = {
},
};
-static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
-
struct platform_device s5p_device_fimc0 = {
.name = "s5p-fimc",
.id = 0,
.num_resources = ARRAY_SIZE(s5p_fimc0_resource),
.resource = s5p_fimc0_resource,
- .dev = {
- .dma_mask = &s5p_fimc0_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
};
diff --git a/trunk/arch/arm/plat-s5p/dev-fimc1.c b/trunk/arch/arm/plat-s5p/dev-fimc1.c
index 76e3a97a87d3..41bd6986d0ad 100644
--- a/trunk/arch/arm/plat-s5p/dev-fimc1.c
+++ b/trunk/arch/arm/plat-s5p/dev-fimc1.c
@@ -10,7 +10,6 @@
*/
#include
-#include
#include
#include
#include
@@ -19,7 +18,7 @@
static struct resource s5p_fimc1_resource[] = {
[0] = {
.start = S5P_PA_FIMC1,
- .end = S5P_PA_FIMC1 + SZ_4K - 1,
+ .end = S5P_PA_FIMC1 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -29,15 +28,9 @@ static struct resource s5p_fimc1_resource[] = {
},
};
-static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
-
struct platform_device s5p_device_fimc1 = {
.name = "s5p-fimc",
.id = 1,
.num_resources = ARRAY_SIZE(s5p_fimc1_resource),
.resource = s5p_fimc1_resource,
- .dev = {
- .dma_mask = &s5p_fimc1_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
};
diff --git a/trunk/arch/arm/plat-s5p/dev-fimc2.c b/trunk/arch/arm/plat-s5p/dev-fimc2.c
index 24d29816fa2c..dfddeda6d4a3 100644
--- a/trunk/arch/arm/plat-s5p/dev-fimc2.c
+++ b/trunk/arch/arm/plat-s5p/dev-fimc2.c
@@ -10,7 +10,6 @@
*/
#include
-#include
#include
#include
#include
@@ -19,7 +18,7 @@
static struct resource s5p_fimc2_resource[] = {
[0] = {
.start = S5P_PA_FIMC2,
- .end = S5P_PA_FIMC2 + SZ_4K - 1,
+ .end = S5P_PA_FIMC2 + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -29,15 +28,9 @@ static struct resource s5p_fimc2_resource[] = {
},
};
-static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
-
struct platform_device s5p_device_fimc2 = {
.name = "s5p-fimc",
.id = 2,
.num_resources = ARRAY_SIZE(s5p_fimc2_resource),
.resource = s5p_fimc2_resource,
- .dev = {
- .dma_mask = &s5p_fimc2_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
};
diff --git a/trunk/arch/arm/plat-samsung/adc.c b/trunk/arch/arm/plat-samsung/adc.c
index e8f2be2d67f2..04d9521ddc9f 100644
--- a/trunk/arch/arm/plat-samsung/adc.c
+++ b/trunk/arch/arm/plat-samsung/adc.c
@@ -435,6 +435,7 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state)
static int s3c_adc_resume(struct platform_device *pdev)
{
struct adc_device *adc = platform_get_drvdata(pdev);
+ unsigned long flags;
clk_enable(adc->clk);
enable_irq(adc->irq);
diff --git a/trunk/arch/arm/plat-samsung/clock.c b/trunk/arch/arm/plat-samsung/clock.c
index e8d20b0bc50e..90a20512d68d 100644
--- a/trunk/arch/arm/plat-samsung/clock.c
+++ b/trunk/arch/arm/plat-samsung/clock.c
@@ -48,9 +48,6 @@
#include
#include
-#include
-#include /* for s3c24xx_uart_devs */
-
/* clock information */
static LIST_HEAD(clocks);
@@ -68,28 +65,6 @@ static int clk_null_enable(struct clk *clk, int enable)
return 0;
}
-static int dev_is_s3c_uart(struct device *dev)
-{
- struct platform_device **pdev = s3c24xx_uart_devs;
- int i;
- for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++)
- if (*pdev && dev == &(*pdev)->dev)
- return 1;
- return 0;
-}
-
-/*
- * Serial drivers call get_clock() very early, before platform bus
- * has been set up, this requires a special check to let them get
- * a proper clock
- */
-
-static int dev_is_platform_device(struct device *dev)
-{
- return dev->bus == &platform_bus_type ||
- (dev->bus == NULL && dev_is_s3c_uart(dev));
-}
-
/* Clock API calls */
struct clk *clk_get(struct device *dev, const char *id)
@@ -98,7 +73,7 @@ struct clk *clk_get(struct device *dev, const char *id)
struct clk *clk = ERR_PTR(-ENOENT);
int idno;
- if (dev == NULL || !dev_is_platform_device(dev))
+ if (dev == NULL || dev->bus != &platform_bus_type)
idno = -1;
else
idno = to_platform_device(dev)->id;
diff --git a/trunk/arch/arm/plat-samsung/gpio-config.c b/trunk/arch/arm/plat-samsung/gpio-config.c
index e3d41eaed1ff..57b68a50f45e 100644
--- a/trunk/arch/arm/plat-samsung/gpio-config.c
+++ b/trunk/arch/arm/plat-samsung/gpio-config.c
@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
if (!chip)
return -EINVAL;
- off = pin - chip->chip.base;
+ off = chip->chip.base - pin;
shift = off * 2;
reg = chip->base + 0x0C;
drvstr = __raw_readl(reg);
+ drvstr = 0xffff & (0x3 << shift);
drvstr = drvstr >> shift;
- drvstr &= 0x3;
return (__force s5p_gpio_drvstr_t)drvstr;
}
@@ -296,12 +296,11 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
if (!chip)
return -EINVAL;
- off = pin - chip->chip.base;
+ off = chip->chip.base - pin;
shift = off * 2;
reg = chip->base + 0x0C;
tmp = __raw_readl(reg);
- tmp &= ~(0x3 << shift);
tmp |= drvstr << shift;
__raw_writel(tmp, reg);
diff --git a/trunk/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/trunk/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index 1c6b92947c5d..db4112c6f2be 100644
--- a/trunk/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/trunk/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
/* Define values for the drvstr available for each gpio pin.
*
* These values control the value of the output signal driver strength,
- * configurable on most pins on the S5P series.
+ * configurable on most pins on the S5C series.
*/
-#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0)
-#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2)
-#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1)
-#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3)
+#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00)
+#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01)
+#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10)
+#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11)
/**
* s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
diff --git a/trunk/arch/avr32/kernel/module.c b/trunk/arch/avr32/kernel/module.c
index a727f54d64d6..98f94d041d9c 100644
--- a/trunk/arch/avr32/kernel/module.c
+++ b/trunk/arch/avr32/kernel/module.c
@@ -314,9 +314,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
- return 0;
+ return module_bug_finalize(hdr, sechdrs, module);
}
void module_arch_cleanup(struct module *module)
{
+ module_bug_cleanup(module);
}
diff --git a/trunk/arch/frv/Kconfig b/trunk/arch/frv/Kconfig
index 0f2417df6323..16399bd24993 100644
--- a/trunk/arch/frv/Kconfig
+++ b/trunk/arch/frv/Kconfig
@@ -7,7 +7,6 @@ config FRV
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
config ZONE_DMA
diff --git a/trunk/arch/frv/kernel/signal.c b/trunk/arch/frv/kernel/signal.c
index bab01298b58e..0974c0ecc594 100644
--- a/trunk/arch/frv/kernel/signal.c
+++ b/trunk/arch/frv/kernel/signal.c
@@ -121,9 +121,6 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
struct user_context *user = current->thread.user;
unsigned long tbr, psr;
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
tbr = user->i.tbr;
psr = user->i.psr;
if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -253,8 +250,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
struct sigframe __user *frame;
int rsig;
- set_fs(USER_DS);
-
frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -298,23 +293,22 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
(unsigned long) (frame->retcode + 2));
}
- /* Set up registers for the signal handler */
+ /* set up registers for signal handler */
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
+
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- struct fdpic_func_descriptor desc;
- if (copy_from_user(&desc, funcptr, sizeof(desc)))
- goto give_sigsegv;
- __frame->pc = desc.text;
- __frame->gr15 = desc.GOT;
+ __get_user(__frame->pc, &funcptr->text);
+ __get_user(__frame->gr15, &funcptr->GOT);
} else {
__frame->pc = (unsigned long) ka->sa.sa_handler;
__frame->gr15 = 0;
}
- __frame->sp = (unsigned long) frame;
- __frame->lr = (unsigned long) &frame->retcode;
- __frame->gr8 = sig;
+ set_fs(USER_DS);
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
@@ -329,7 +323,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
return 0;
give_sigsegv:
- force_sigsegv(sig, current);
+ force_sig(SIGSEGV, current);
return -EFAULT;
} /* end setup_frame() */
@@ -344,8 +338,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
int rsig;
- set_fs(USER_DS);
-
frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -400,23 +392,22 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
}
/* Set up registers for signal handler */
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
+ __frame->gr9 = (unsigned long) &frame->info;
+
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- struct fdpic_func_descriptor desc;
- if (copy_from_user(&desc, funcptr, sizeof(desc)))
- goto give_sigsegv;
- __frame->pc = desc.text;
- __frame->gr15 = desc.GOT;
+ __get_user(__frame->pc, &funcptr->text);
+ __get_user(__frame->gr15, &funcptr->GOT);
} else {
__frame->pc = (unsigned long) ka->sa.sa_handler;
__frame->gr15 = 0;
}
- __frame->sp = (unsigned long) frame;
- __frame->lr = (unsigned long) &frame->retcode;
- __frame->gr8 = sig;
- __frame->gr9 = (unsigned long) &frame->info;
+ set_fs(USER_DS);
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
@@ -431,7 +422,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return 0;
give_sigsegv:
- force_sigsegv(sig, current);
+ force_sig(SIGSEGV, current);
return -EFAULT;
} /* end setup_rt_frame() */
@@ -446,7 +437,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
int ret;
/* Are we from a system call? */
- if (__frame->syscallno != -1) {
+ if (in_syscall(__frame)) {
/* If so, check system call restarting.. */
switch (__frame->gr8) {
case -ERESTART_RESTARTBLOCK:
@@ -465,7 +456,6 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
__frame->gr8 = __frame->orig_gr8;
__frame->pc -= 4;
}
- __frame->syscallno = -1;
}
/* Set up the stack frame */
@@ -548,11 +538,10 @@ static void do_signal(void)
break;
case -ERESTART_RESTARTBLOCK:
- __frame->gr7 = __NR_restart_syscall;
+ __frame->gr8 = __NR_restart_syscall;
__frame->pc -= 4;
break;
}
- __frame->syscallno = -1;
}
/* if there's no signal to deliver, we just put the saved sigmask
diff --git a/trunk/arch/frv/lib/Makefile b/trunk/arch/frv/lib/Makefile
index 4ff2fb1e6b16..f4709756d0d9 100644
--- a/trunk/arch/frv/lib/Makefile
+++ b/trunk/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
diff --git a/trunk/arch/mn10300/mm/cache-disabled.c b/trunk/arch/frv/lib/perf_event.c
similarity index 53%
rename from trunk/arch/mn10300/mm/cache-disabled.c
rename to trunk/arch/frv/lib/perf_event.c
index f669ea42aba6..9ac5acfd2e91 100644
--- a/trunk/arch/mn10300/mm/cache-disabled.c
+++ b/trunk/arch/frv/lib/perf_event.c
@@ -1,6 +1,6 @@
-/* Handle the cache being disabled
+/* Performance event handling
*
- * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -8,14 +8,12 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
-#include
+
+#include
/*
- * allow userspace to flush the instruction cache
+ * mark the performance event as pending
*/
-asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+void set_perf_event_pending(void)
{
- if (end < start)
- return -EINVAL;
- return 0;
}
diff --git a/trunk/arch/h8300/kernel/module.c b/trunk/arch/h8300/kernel/module.c
index db4953dc4e1b..0865e291c20d 100644
--- a/trunk/arch/h8300/kernel/module.c
+++ b/trunk/arch/h8300/kernel/module.c
@@ -112,9 +112,10 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- return 0;
+ return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
+ module_bug_cleanup(mod);
}
diff --git a/trunk/arch/ia64/include/asm/compat.h b/trunk/arch/ia64/include/asm/compat.h
index 9301a2821615..f90edc85b509 100644
--- a/trunk/arch/ia64/include/asm/compat.h
+++ b/trunk/arch/ia64/include/asm/compat.h
@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
}
static __inline__ void __user *
-arch_compat_alloc_user_space (long len)
+compat_alloc_user_space (long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
diff --git a/trunk/arch/ia64/include/asm/hardirq.h b/trunk/arch/ia64/include/asm/hardirq.h
index 8fb7d33a661f..d514cd9edb49 100644
--- a/trunk/arch/ia64/include/asm/hardirq.h
+++ b/trunk/arch/ia64/include/asm/hardirq.h
@@ -6,6 +6,12 @@
* David Mosberger-Tang
*/
+
+#include
+#include
+
+#include
+
/*
* No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
*/
@@ -14,11 +20,6 @@
#define local_softirq_pending() (local_cpu_data->softirq_pending)
-#include
-#include
-
-#include
-
extern void __iomem *ipi_base_addr;
void ack_bad_irq(unsigned int irq);
diff --git a/trunk/arch/ia64/include/asm/system.h b/trunk/arch/ia64/include/asm/system.h
index dd028f2b13b3..9f342a574ce8 100644
--- a/trunk/arch/ia64/include/asm/system.h
+++ b/trunk/arch/ia64/include/asm/system.h
@@ -272,6 +272,10 @@ void cpu_idle_wait(void);
void default_idle(void);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/trunk/arch/ia64/kernel/fsys.S b/trunk/arch/ia64/kernel/fsys.S
index 331d42bda77a..3567d54f8cee 100644
--- a/trunk/arch/ia64/kernel/fsys.S
+++ b/trunk/arch/ia64/kernel/fsys.S
@@ -420,31 +420,22 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
;;
RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
+ mov ar.ccv=0
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
- mov r8=EINVAL // default to EINVAL
#ifdef CONFIG_SMP
- // __ticket_spin_trylock(r31)
- ld4 r17=[r31]
- ;;
- mov.m ar.ccv=r17
- extr.u r9=r17,17,15
- adds r19=1,r17
- extr.u r18=r17,0,15
- ;;
- cmp.eq p6,p7=r9,r18
+ mov r17=1
;;
-(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
-(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
-(p7) br.cond.spnt.many .lock_contention
+ cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
+ mov r8=EINVAL // default to EINVAL
;;
- cmp4.eq p0,p7=r9,r17
- adds r31=2,r31
-(p7) br.cond.spnt.many .lock_contention
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+ cmp4.ne p6,p0=r18,r0
+(p6) br.cond.spnt.many .lock_contention
;;
#else
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+ mov r8=EINVAL // default to EINVAL
#endif
add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -499,9 +490,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
(p6) br.cond.spnt.few 1b // yes -> retry
#ifdef CONFIG_SMP
- // __ticket_spin_unlock(r31)
- st2.rel [r31]=r20
- mov r20=0 // i must not leak kernel bits...
+ st4.rel [r31]=r0 // release the lock
#endif
SSM_PSR_I(p0, p9, r31)
;;
@@ -523,8 +512,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
.sig_pending:
#ifdef CONFIG_SMP
- // __ticket_spin_unlock(r31)
- st2.rel [r31]=r20 // release the lock
+ st4.rel [r31]=r0 // release the lock
#endif
SSM_PSR_I(p0, p9, r17)
;;
diff --git a/trunk/arch/m32r/include/asm/elf.h b/trunk/arch/m32r/include/asm/elf.h
index b8da7d0574d2..2f85412ef730 100644
--- a/trunk/arch/m32r/include/asm/elf.h
+++ b/trunk/arch/m32r/include/asm/elf.h
@@ -82,9 +82,9 @@ typedef elf_fpreg_t elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
-#if defined(__LITTLE_ENDIAN__)
+#if defined(__LITTLE_ENDIAN)
#define ELF_DATA ELFDATA2LSB
-#elif defined(__BIG_ENDIAN__)
+#elif defined(__BIG_ENDIAN)
#define ELF_DATA ELFDATA2MSB
#else
#error no endian defined
diff --git a/trunk/arch/m32r/include/asm/signal.h b/trunk/arch/m32r/include/asm/signal.h
index b2eeb0de1c8d..9c1acb2b1a92 100644
--- a/trunk/arch/m32r/include/asm/signal.h
+++ b/trunk/arch/m32r/include/asm/signal.h
@@ -157,6 +157,7 @@ typedef struct sigaltstack {
#undef __HAVE_ARCH_SIG_BITOPS
struct pt_regs;
+extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
diff --git a/trunk/arch/m32r/include/asm/unistd.h b/trunk/arch/m32r/include/asm/unistd.h
index c70545689da8..76125777483c 100644
--- a/trunk/arch/m32r/include/asm/unistd.h
+++ b/trunk/arch/m32r/include/asm/unistd.h
@@ -351,7 +351,6 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __IGNORE_lchown
#define __IGNORE_setuid
diff --git a/trunk/arch/m32r/kernel/.gitignore b/trunk/arch/m32r/kernel/.gitignore
deleted file mode 100644
index c5f676c3c224..000000000000
--- a/trunk/arch/m32r/kernel/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vmlinux.lds
diff --git a/trunk/arch/m32r/kernel/entry.S b/trunk/arch/m32r/kernel/entry.S
index 225412bc227e..403869833b98 100644
--- a/trunk/arch/m32r/kernel/entry.S
+++ b/trunk/arch/m32r/kernel/entry.S
@@ -235,9 +235,10 @@ work_resched:
work_notifysig: ; deal with pending signals and
; notify-resume requests
mv r0, sp ; arg1 : struct pt_regs *regs
- mv r1, r9 ; arg2 : __u32 thread_info_flags
+ ldi r1, #0 ; arg2 : sigset_t *oldset
+ mv r2, r9 ; arg3 : __u32 thread_info_flags
bl do_notify_resume
- bra resume_userspace
+ bra restore_all
; perform syscall exit tracing
ALIGN
diff --git a/trunk/arch/m32r/kernel/ptrace.c b/trunk/arch/m32r/kernel/ptrace.c
index 0021ade4cba8..e555091eb97c 100644
--- a/trunk/arch/m32r/kernel/ptrace.c
+++ b/trunk/arch/m32r/kernel/ptrace.c
@@ -592,17 +592,16 @@ void user_enable_single_step(struct task_struct *child)
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
- return -EIO;
+ break;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
- return -EIO;
+ break;
if (embed_debug_trap(child, next_pc))
- return -EIO;
+ break;
invalidate_cache();
- return 0;
}
void user_disable_single_step(struct task_struct *child)
diff --git a/trunk/arch/m32r/kernel/signal.c b/trunk/arch/m32r/kernel/signal.c
index a08697f0886d..144b0f124fc7 100644
--- a/trunk/arch/m32r/kernel/signal.c
+++ b/trunk/arch/m32r/kernel/signal.c
@@ -30,6 +30,35 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+int do_signal(struct pt_regs *, sigset_t *);
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
+ unsigned long r2, unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6, struct pt_regs *regs)
+{
+ sigset_t newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+ spin_lock_irq(¤t->sighand->siglock);
+ current->saved_sigmask = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ return -ERESTARTNOHAND;
+}
+
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r2, unsigned long r3, unsigned long r4,
@@ -189,7 +218,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
@@ -246,34 +275,22 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->pc);
#endif
- return 0;
+ return;
give_sigsegv:
force_sigsegv(sig, current);
- return -EFAULT;
-}
-
-static int prev_insn(struct pt_regs *regs)
-{
- u16 inst;
- if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
- return -EFAULT;
- if ((inst & 0xfff0) == 0x10f0) /* trap ? */
- regs->bpc -= 2;
- else
- regs->bpc -= 4;
- regs->syscall_nr = -1;
- return 0;
}
/*
* OK, we're invoking a handler
*/
-static int
+static void
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
+ unsigned short inst;
+
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
@@ -291,14 +308,16 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
/* fallthrough */
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
- if (prev_insn(regs) < 0)
- return -EFAULT;
+ inst = *(unsigned short *)(regs->bpc - 2);
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
}
}
/* Set up the stack frame */
- if (setup_rt_frame(sig, ka, info, oldset, regs))
- return -EFAULT;
+ setup_rt_frame(sig, ka, info, oldset, regs);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -306,7 +325,6 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigaddset(¤t->blocked,sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
- return 0;
}
/*
@@ -314,12 +332,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-static void do_signal(struct pt_regs *regs)
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
- sigset_t *oldset;
+ unsigned short inst;
/*
* We want the common case to go fast, which
@@ -328,14 +346,12 @@ static void do_signal(struct pt_regs *regs)
* if so.
*/
if (!user_mode(regs))
- return;
+ return 1;
if (try_to_freeze())
goto no_signal;
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = ¤t->saved_sigmask;
- else
+ if (!oldset)
oldset = ¤t->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -347,10 +363,8 @@ static void do_signal(struct pt_regs *regs)
*/
/* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-
- return;
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
}
no_signal:
@@ -361,24 +375,31 @@ static void do_signal(struct pt_regs *regs)
regs->r0 == -ERESTARTSYS ||
regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
- prev_insn(regs);
- } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
+ inst = *(unsigned short *)(regs->bpc - 2);
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
+ }
+ if (regs->r0 == -ERESTART_RESTARTBLOCK){
regs->r0 = regs->orig_r0;
regs->r7 = __NR_restart_syscall;
- prev_insn(regs);
+ inst = *(unsigned short *)(regs->bpc - 2);
+ if ((inst & 0xfff0) == 0x10f0) /* trap ? */
+ regs->bpc -= 2;
+ else
+ regs->bpc -= 4;
}
}
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
- }
+ return 0;
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
-void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+ __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
@@ -386,7 +407,7 @@ void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
+ do_signal(regs,oldset);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/trunk/arch/m68k/include/asm/unistd.h b/trunk/arch/m68k/include/asm/unistd.h
index b43b36beafe3..60b15d0aa072 100644
--- a/trunk/arch/m68k/include/asm/unistd.h
+++ b/trunk/arch/m68k/include/asm/unistd.h
@@ -340,13 +340,10 @@
#define __NR_set_thread_area 334
#define __NR_atomic_cmpxchg_32 335
#define __NR_atomic_barrier 336
-#define __NR_fanotify_init 337
-#define __NR_fanotify_mark 338
-#define __NR_prlimit64 339
#ifdef __KERNEL__
-#define NR_syscalls 340
+#define NR_syscalls 337
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/trunk/arch/m68k/kernel/entry.S b/trunk/arch/m68k/kernel/entry.S
index 6360c437dcf5..2391bdff0996 100644
--- a/trunk/arch/m68k/kernel/entry.S
+++ b/trunk/arch/m68k/kernel/entry.S
@@ -765,7 +765,4 @@ sys_call_table:
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64
diff --git a/trunk/arch/m68k/mac/macboing.c b/trunk/arch/m68k/mac/macboing.c
index 05285d08e547..8f0640847ad2 100644
--- a/trunk/arch/m68k/mac/macboing.c
+++ b/trunk/arch/m68k/mac/macboing.c
@@ -162,7 +162,7 @@ static void mac_init_asc( void )
void mac_mksound( unsigned int freq, unsigned int length )
{
__u32 cfreq = ( freq << 5 ) / 468;
- unsigned long flags;
+ __u32 flags;
int i;
if ( mac_special_bell == NULL )
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored )
*/
static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume )
{
- unsigned long flags;
+ __u32 flags;
/* if the bell is already ringing, ring longer */
if ( mac_bell_duration > 0 )
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig
static void mac_quadra_ring_bell( unsigned long ignored )
{
int i, count = mac_asc_samplespersec / HZ;
- unsigned long flags;
+ __u32 flags;
/*
* we neither want a sound buffer overflow nor underflow, so we need to match
diff --git a/trunk/arch/m68knommu/kernel/syscalltable.S b/trunk/arch/m68knommu/kernel/syscalltable.S
index 79b1ed198c07..b30b3eb197a5 100644
--- a/trunk/arch/m68knommu/kernel/syscalltable.S
+++ b/trunk/arch/m68knommu/kernel/syscalltable.S
@@ -355,9 +355,6 @@ ENTRY(sys_call_table)
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/trunk/arch/mips/Kbuild b/trunk/arch/mips/Kbuild
index 7dd65cfae837..e322d65f33a4 100644
--- a/trunk/arch/mips/Kbuild
+++ b/trunk/arch/mips/Kbuild
@@ -7,10 +7,6 @@ subdir-ccflags-y := -Werror
include arch/mips/Kbuild.platforms
obj-y := $(platform-y)
-# make clean traverses $(obj-) without having included .config, so
-# everything ends up here
-obj- := $(platform-)
-
# mips object files
# The object files are linked as core-y files would be linked
diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig
index 4c9f402295dd..3ad59dde4852 100644
--- a/trunk/arch/mips/Kconfig
+++ b/trunk/arch/mips/Kconfig
@@ -13,7 +13,6 @@ config MIPS
select HAVE_KPROBES
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
- select GENERIC_ATOMIC64 if !64BIT
mainmenu "Linux/MIPS Kernel Configuration"
@@ -881,15 +880,11 @@ config NO_IOPORT
config GENERIC_ISA_DMA
bool
select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
- select ISA_DMA_API
config GENERIC_ISA_DMA_SUPPORT_BROKEN
bool
select GENERIC_ISA_DMA
-config ISA_DMA_API
- bool
-
config GENERIC_GPIO
bool
@@ -1651,16 +1646,8 @@ config MIPS_MT_SMP
select SYS_SUPPORTS_SMP
select SMP_UP
help
- This is a kernel model which is known a VSMP but lately has been
- marketesed into SMVP.
- Virtual SMP uses the processor's VPEs to implement virtual
- processors. In currently available configuration of the 34K processor
- this allows for a dual processor. Both processors will share the same
- primary caches; each will obtain the half of the TLB for it's own
- exclusive use. For a layman this model can be described as similar to
- what Intel calls Hyperthreading.
-
- For further information see http://www.linux-mips.org/wiki/34K#VSMP
+ This is a kernel model which is also known a VSMP or lately
+ has been marketesed into SMVP.
config MIPS_MT_SMTC
bool "SMTC: Use all TCs on all VPEs for SMP"
@@ -1677,14 +1664,6 @@ config MIPS_MT_SMTC
help
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
- is presenting the available TC's of the core as processors to Linux.
- On currently available 34K processors this means a Linux system will
- see up to 5 processors. The implementation of the SMTC kernel differs
- significantly from VSMP and cannot efficiently coexist in the same
- kernel binary so the choice between VSMP and SMTC is a compile time
- decision.
-
- For further information see http://www.linux-mips.org/wiki/34K#SMTC
endchoice
diff --git a/trunk/arch/mips/alchemy/common/prom.c b/trunk/arch/mips/alchemy/common/prom.c
index 534021059629..c29511b11d44 100644
--- a/trunk/arch/mips/alchemy/common/prom.c
+++ b/trunk/arch/mips/alchemy/common/prom.c
@@ -43,7 +43,7 @@ int prom_argc;
char **prom_argv;
char **prom_envp;
-void __init prom_init_cmdline(void)
+void prom_init_cmdline(void)
{
int i;
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
}
}
-int __init prom_get_ethernet_addr(char *ethernet_addr)
+int prom_get_ethernet_addr(char *ethernet_addr)
{
char *ethaddr_str;
@@ -123,6 +123,7 @@ int __init prom_get_ethernet_addr(char *ethernet_addr)
return 0;
}
+EXPORT_SYMBOL(prom_get_ethernet_addr);
void __init prom_free_prom_memory(void)
{
diff --git a/trunk/arch/mips/boot/compressed/Makefile b/trunk/arch/mips/boot/compressed/Makefile
index 5042d51b0512..ed9bb709c9a3 100644
--- a/trunk/arch/mips/boot/compressed/Makefile
+++ b/trunk/arch/mips/boot/compressed/Makefile
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
hostprogs-y := calc_vmlinuz_load_addr
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
- $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
+ $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS))
vmlinuzobjs-y += $(obj)/piggy.o
@@ -105,4 +105,4 @@ OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec
vmlinuz.srec: vmlinuz
$(call cmd,objcopy)
-clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
+clean-files := $(objtree)/vmlinuz.*
diff --git a/trunk/arch/mips/cavium-octeon/Kconfig b/trunk/arch/mips/cavium-octeon/Kconfig
index 47323ca452dc..094c17e38e16 100644
--- a/trunk/arch/mips/cavium-octeon/Kconfig
+++ b/trunk/arch/mips/cavium-octeon/Kconfig
@@ -83,7 +83,3 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_STATIC
depends on CPU_CAVIUM_OCTEON
-
-config CAVIUM_OCTEON_HELPER
- def_bool y
- depends on OCTEON_ETHERNET || PCI
diff --git a/trunk/arch/mips/cavium-octeon/cpu.c b/trunk/arch/mips/cavium-octeon/cpu.c
index a5b427909b5c..c664c8cc2b42 100644
--- a/trunk/arch/mips/cavium-octeon/cpu.c
+++ b/trunk/arch/mips/cavium-octeon/cpu.c
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK; /* Let default notifier send signals */
}
-static int __init cnmips_cu2_setup(void)
+static int cnmips_cu2_setup(void)
{
return cu2_notifier(cnmips_cu2_call, 0);
}
diff --git a/trunk/arch/mips/cavium-octeon/executive/Makefile b/trunk/arch/mips/cavium-octeon/executive/Makefile
index 7f41c5be2190..2fd66db6939e 100644
--- a/trunk/arch/mips/cavium-octeon/executive/Makefile
+++ b/trunk/arch/mips/cavium-octeon/executive/Makefile
@@ -11,4 +11,4 @@
obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
-obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o
+obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o
diff --git a/trunk/arch/mips/dec/Platform b/trunk/arch/mips/dec/Platform
index cf55a6f4e720..3adbcbd95db1 100644
--- a/trunk/arch/mips/dec/Platform
+++ b/trunk/arch/mips/dec/Platform
@@ -1,7 +1,7 @@
#
# DECstation family
#
-platform-$(CONFIG_MACH_DECSTATION) += dec/
+platform-$(CONFIG_MACH_DECSTATION) = dec/
cflags-$(CONFIG_MACH_DECSTATION) += \
-I$(srctree)/arch/mips/include/asm/mach-dec
libs-$(CONFIG_MACH_DECSTATION) += arch/mips/dec/prom/
diff --git a/trunk/arch/mips/include/asm/atomic.h b/trunk/arch/mips/include/asm/atomic.h
index 47d87da379f9..c63c56bfd184 100644
--- a/trunk/arch/mips/include/asm/atomic.h
+++ b/trunk/arch/mips/include/asm/atomic.h
@@ -782,10 +782,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
*/
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-#else /* !CONFIG_64BIT */
-
-#include
-
#endif /* CONFIG_64BIT */
/*
diff --git a/trunk/arch/mips/include/asm/compat.h b/trunk/arch/mips/include/asm/compat.h
index dbc51065df5b..613f6912dfc1 100644
--- a/trunk/arch/mips/include/asm/compat.h
+++ b/trunk/arch/mips/include/asm/compat.h
@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = (struct pt_regs *)
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
diff --git a/trunk/arch/mips/include/asm/cop2.h b/trunk/arch/mips/include/asm/cop2.h
index 3532e2c5f098..2cb2f0c2c4f8 100644
--- a/trunk/arch/mips/include/asm/cop2.h
+++ b/trunk/arch/mips/include/asm/cop2.h
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v);
#define cu2_notifier(fn, pri) \
({ \
- static struct notifier_block fn##_nb = { \
+ static struct notifier_block fn##_nb __cpuinitdata = { \
.notifier_call = fn, \
.priority = pri \
}; \
diff --git a/trunk/arch/mips/include/asm/fcntl.h b/trunk/arch/mips/include/asm/fcntl.h
index 75eddedcfc3e..e482fe90fe88 100644
--- a/trunk/arch/mips/include/asm/fcntl.h
+++ b/trunk/arch/mips/include/asm/fcntl.h
@@ -56,7 +56,6 @@
*/
#ifdef CONFIG_32BIT
-#include
struct flock {
short l_type;
diff --git a/trunk/arch/mips/include/asm/gic.h b/trunk/arch/mips/include/asm/gic.h
index 86548da650e7..9b9436a4d816 100644
--- a/trunk/arch/mips/include/asm/gic.h
+++ b/trunk/arch/mips/include/asm/gic.h
@@ -321,7 +321,6 @@ struct gic_intrmask_regs {
*/
struct gic_intr_map {
unsigned int cpunum; /* Directed to this CPU */
-#define GIC_UNUSED 0xdead /* Dummy data */
unsigned int pin; /* Directed to this Pin */
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
diff --git a/trunk/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/trunk/arch/mips/include/asm/mach-tx49xx/kmalloc.h
index ff9a8b86cb93..b74caf65482b 100644
--- a/trunk/arch/mips/include/asm/mach-tx49xx/kmalloc.h
+++ b/trunk/arch/mips/include/asm/mach-tx49xx/kmalloc.h
@@ -1,6 +1,6 @@
#ifndef __ASM_MACH_TX49XX_KMALLOC_H
#define __ASM_MACH_TX49XX_KMALLOC_H
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#endif /* __ASM_MACH_TX49XX_KMALLOC_H */
diff --git a/trunk/arch/mips/include/asm/mips-boards/maltaint.h b/trunk/arch/mips/include/asm/mips-boards/maltaint.h
index d11aa02a956a..cea872fc6f5c 100644
--- a/trunk/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/trunk/arch/mips/include/asm/mips-boards/maltaint.h
@@ -88,6 +88,9 @@
#define GIC_EXT_INTR(x) x
+/* Dummy data */
+#define X 0xdead
+
/* External Interrupts used for IPI */
#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16
#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
diff --git a/trunk/arch/mips/include/asm/page.h b/trunk/arch/mips/include/asm/page.h
index e59cd1ac09c2..a16beafcea91 100644
--- a/trunk/arch/mips/include/asm/page.h
+++ b/trunk/arch/mips/include/asm/page.h
@@ -150,20 +150,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
#endif
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
-
-/*
- * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
- * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
- * discussion can be found in lkml posting
- * which is
- * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
- *
- * It is unclear if the misscompilations mentioned in
- * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
- * until GCC 3.x has been retired before we can apply
- * https://patchwork.linux-mips.org/patch/1541/
- */
-
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/trunk/arch/mips/include/asm/siginfo.h b/trunk/arch/mips/include/asm/siginfo.h
index 1ca64b4d33d9..96e28f18dad1 100644
--- a/trunk/arch/mips/include/asm/siginfo.h
+++ b/trunk/arch/mips/include/asm/siginfo.h
@@ -88,7 +88,6 @@ typedef struct siginfo {
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
- short _addr_lsb;
} _sigfault;
/* SIGPOLL, SIGXFSZ (To do ...) */
diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h
index 70df9c0d3c5b..2376f2e06e47 100644
--- a/trunk/arch/mips/include/asm/thread_info.h
+++ b/trunk/arch/mips/include/asm/thread_info.h
@@ -146,8 +146,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_LOAD_WATCH (1<regs[0] = 0;
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
diff --git a/trunk/arch/mips/kernel/irq-gic.c b/trunk/arch/mips/kernel/irq-gic.c
index 82ba9f62f49e..b181f2f0ea8e 100644
--- a/trunk/arch/mips/kernel/irq-gic.c
+++ b/trunk/arch/mips/kernel/irq-gic.c
@@ -7,6 +7,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -130,7 +131,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
int i;
irq -= _irqbase;
- pr_debug("%s(%d) called\n", __func__, irq);
+ pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq);
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
return -1;
@@ -221,7 +222,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
/* Setup specifics */
for (i = 0; i < mapsize; i++) {
cpu = intrmap[i].cpunum;
- if (cpu == GIC_UNUSED)
+ if (cpu == X)
continue;
if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
continue;
diff --git a/trunk/arch/mips/kernel/kgdb.c b/trunk/arch/mips/kernel/kgdb.c
index f4546e97c60d..1f4e2fa64140 100644
--- a/trunk/arch/mips/kernel/kgdb.c
+++ b/trunk/arch/mips/kernel/kgdb.c
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
- /* Userspace events, ignore. */
+ /* Userpace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
diff --git a/trunk/arch/mips/kernel/kspd.c b/trunk/arch/mips/kernel/kspd.c
index 29811f043399..80e2ba694bab 100644
--- a/trunk/arch/mips/kernel/kspd.c
+++ b/trunk/arch/mips/kernel/kspd.c
@@ -251,7 +251,7 @@ void sp_work_handle_request(void)
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0, 0)) == 0)
- ret.retval = tv.tv_sec;
+ ret.retval = tv.tv_sec;
break;
case MTSP_SYSCALL_EXIT:
diff --git a/trunk/arch/mips/kernel/linux32.c b/trunk/arch/mips/kernel/linux32.c
index 6343b4a5b835..c2dab140dc98 100644
--- a/trunk/arch/mips/kernel/linux32.c
+++ b/trunk/arch/mips/kernel/linux32.c
@@ -341,10 +341,3 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
{
return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
}
-
-SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
- u64, a3, u64, a4, int, dfd, const char __user *, pathname)
-{
- return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
- dfd, pathname);
-}
diff --git a/trunk/arch/mips/kernel/mips-mt-fpaff.c b/trunk/arch/mips/kernel/mips-mt-fpaff.c
index 9a526ba6f257..2340f11dc29c 100644
--- a/trunk/arch/mips/kernel/mips-mt-fpaff.c
+++ b/trunk/arch/mips/kernel/mips-mt-fpaff.c
@@ -103,7 +103,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- retval = security_task_setscheduler(p)
+ retval = security_task_setscheduler(p, 0, NULL);
if (retval)
goto out_unlock;
diff --git a/trunk/arch/mips/kernel/ptrace.c b/trunk/arch/mips/kernel/ptrace.c
index c8777333e198..c51b95ff8644 100644
--- a/trunk/arch/mips/kernel/ptrace.c
+++ b/trunk/arch/mips/kernel/ptrace.c
@@ -536,7 +536,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
/* do the secure computing check first */
if (!entryexit)
- secure_computing(regs->regs[2]);
+ secure_computing(regs->regs[0]);
if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
@@ -565,7 +565,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
out:
if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(audit_arch(), regs->regs[2],
+ audit_syscall_entry(audit_arch(), regs->regs[0],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
}
diff --git a/trunk/arch/mips/kernel/scall32-o32.S b/trunk/arch/mips/kernel/scall32-o32.S
index fbaabad0e6e2..17202bbe843f 100644
--- a/trunk/arch/mips/kernel/scall32-o32.S
+++ b/trunk/arch/mips/kernel/scall32-o32.S
@@ -63,9 +63,9 @@ stack_done:
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
- lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw t1, PT_R0(sp) # save it for syscall restarting
+ sw v0, PT_R0(sp) # set flag for syscall
+ # restarting
1: sw v0, PT_R2(sp) # result
o32_syscall_exit:
@@ -104,9 +104,9 @@ syscall_trace_entry:
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
- lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw t1, PT_R0(sp) # save it for syscall restarting
+ sw v0, PT_R0(sp) # set flag for syscall
+ # restarting
1: sw v0, PT_R2(sp) # result
j syscall_exit
@@ -169,7 +169,8 @@ stackargs:
* We probably should handle this case a bit more drastic.
*/
bad_stack:
- li v0, EFAULT
+ negu v0 # error
+ sw v0, PT_R0(sp)
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
@@ -582,10 +583,7 @@ einval: li v0, -ENOSYS
sys sys_rt_tgsigqueueinfo 4
sys sys_perf_event_open 5
sys sys_accept4 4
- sys sys_recvmmsg 5 /* 4335 */
- sys sys_fanotify_init 2
- sys sys_fanotify_mark 6
- sys sys_prlimit64 4
+ sys sys_recvmmsg 5
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/trunk/arch/mips/kernel/scall64-64.S b/trunk/arch/mips/kernel/scall64-64.S
index 3f4179283207..a8a6c596eb04 100644
--- a/trunk/arch/mips/kernel/scall64-64.S
+++ b/trunk/arch/mips/kernel/scall64-64.S
@@ -66,9 +66,9 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
- ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
+ sd v0, PT_R0(sp) # set flag for syscall
+ # restarting
1: sd v0, PT_R2(sp) # result
n64_syscall_exit:
@@ -109,9 +109,8 @@ syscall_trace_entry:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
- ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
+ sd v0, PT_R0(sp) # set flag for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -417,12 +416,9 @@ sys_call_table:
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
- PTR sys_pwritev /* 5290 */
+ PTR sys_pwritev /* 5390 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
- PTR sys_recvmmsg
- PTR sys_fanotify_init /* 5295 */
- PTR sys_fanotify_mark
- PTR sys_prlimit64
+ PTR sys_recvmmsg
.size sys_call_table,.-sys_call_table
diff --git a/trunk/arch/mips/kernel/scall64-n32.S b/trunk/arch/mips/kernel/scall64-n32.S
index f08ece6d8acc..a3d66137731a 100644
--- a/trunk/arch/mips/kernel/scall64-n32.S
+++ b/trunk/arch/mips/kernel/scall64-n32.S
@@ -65,9 +65,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
- ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
+ sd v0, PT_R0(sp) # set flag for syscall restarting
1: sd v0, PT_R2(sp) # result
local_irq_disable # make sure need_resched and
@@ -107,9 +106,8 @@ n32_syscall_trace_entry:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
- ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
+ sd v0, PT_R0(sp) # set flag for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -322,10 +320,10 @@ EXPORT(sysn32_call_table)
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
- PTR compat_sys_io_setup /* 6200 */
+ PTR sys_io_setup /* 6200 */
PTR sys_io_destroy
- PTR compat_sys_io_getevents
- PTR compat_sys_io_submit
+ PTR sys_io_getevents
+ PTR sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 6205 */
PTR sys_lookup_dcookie
@@ -421,8 +419,5 @@ EXPORT(sysn32_call_table)
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
- PTR sys_getdents64
- PTR sys_fanotify_init /* 6300 */
- PTR sys_fanotify_mark
- PTR sys_prlimit64
+ PTR sys_getdents
.size sysn32_call_table,.-sysn32_call_table
diff --git a/trunk/arch/mips/kernel/scall64-o32.S b/trunk/arch/mips/kernel/scall64-o32.S
index 78d768a3e19d..813689ef2384 100644
--- a/trunk/arch/mips/kernel/scall64-o32.S
+++ b/trunk/arch/mips/kernel/scall64-o32.S
@@ -93,9 +93,8 @@ NESTED(handle_sys, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
- ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
+ sd v0, PT_R0(sp) # flag for syscall restarting
1: sd v0, PT_R2(sp) # result
o32_syscall_exit:
@@ -143,9 +142,8 @@ trace_a_syscall:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
- ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
+ sd v0, PT_R0(sp) # set flag for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit
@@ -156,7 +154,8 @@ trace_a_syscall:
* The stackpointer for a call with more than 4 arguments is bad.
*/
bad_stack:
- li v0, EFAULT
+ dnegu v0 # error
+ sd v0, PT_R0(sp)
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
@@ -445,10 +444,10 @@ sys_call_table:
PTR compat_sys_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
- PTR compat_sys_io_setup
+ PTR sys_io_setup
PTR sys_io_destroy
- PTR compat_sys_io_getevents
- PTR compat_sys_io_submit
+ PTR sys_io_getevents
+ PTR sys_io_submit
PTR sys_io_cancel /* 4245 */
PTR sys_exit_group
PTR sys32_lookup_dcookie
@@ -539,8 +538,5 @@ sys_call_table:
PTR compat_sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
- PTR compat_sys_recvmmsg /* 4335 */
- PTR sys_fanotify_init
- PTR sys_32_fanotify_mark
- PTR sys_prlimit64
+ PTR compat_sys_recvmmsg
.size sys_call_table,.-sys_call_table
diff --git a/trunk/arch/mips/kernel/signal.c b/trunk/arch/mips/kernel/signal.c
index 5922342bca39..2099d5a4c4b7 100644
--- a/trunk/arch/mips/kernel/signal.c
+++ b/trunk/arch/mips/kernel/signal.c
@@ -390,6 +390,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
+ stack_t st;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
@@ -410,9 +411,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
else if (sig)
force_sig(sig, current);
+ if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
+ goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
+ do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
/*
* Don't let your children do this ...
@@ -547,27 +550,24 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
- if (regs->regs[0]) {
- switch(regs->regs[2]) {
- case ERESTART_RESTARTBLOCK:
- case ERESTARTNOHAND:
+ switch(regs->regs[0]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
+ regs->regs[2] = EINTR;
+ break;
+ case ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->regs[2] = EINTR;
break;
- case ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->regs[2] = EINTR;
- break;
- }
- /* fallthrough */
- case ERESTARTNOINTR:
- regs->regs[7] = regs->regs[26];
- regs->regs[2] = regs->regs[0];
- regs->cp0_epc -= 4;
}
-
- regs->regs[0] = 0; /* Don't deal with this again. */
+ /* fallthrough */
+ case ERESTARTNOINTR: /* Userland will reload $v0. */
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 8;
}
+ regs->regs[0] = 0; /* Don't deal with this again. */
+
if (sig_uses_siginfo(ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
ka, regs, sig, oldset, info);
@@ -575,9 +575,6 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
ret = abi->setup_frame(vdso + abi->signal_return_offset,
ka, regs, sig, oldset);
- if (ret)
- return ret;
-
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -625,13 +622,17 @@ static void do_signal(struct pt_regs *regs)
return;
}
+ /*
+ * Who's code doesn't conform to the restartable syscall convention
+ * dies here!!! The li instruction, a single machine instruction,
+ * must directly be followed by the syscall instruction.
+ */
if (regs->regs[0]) {
if (regs->regs[2] == ERESTARTNOHAND ||
regs->regs[2] == ERESTARTSYS ||
regs->regs[2] == ERESTARTNOINTR) {
- regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 4;
+ regs->cp0_epc -= 8;
}
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
regs->regs[2] = current->thread.abi->restart;
diff --git a/trunk/arch/mips/kernel/signal_n32.c b/trunk/arch/mips/kernel/signal_n32.c
index ee24d814d5b9..2c5df818c65a 100644
--- a/trunk/arch/mips/kernel/signal_n32.c
+++ b/trunk/arch/mips/kernel/signal_n32.c
@@ -109,7 +109,6 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe_n32 __user *frame;
- mm_segment_t old_fs;
sigset_t set;
stack_t st;
s32 sp;
@@ -144,11 +143,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
- set_fs(old_fs);
-
/*
* Don't let your children do this ...
diff --git a/trunk/arch/mips/kernel/unaligned.c b/trunk/arch/mips/kernel/unaligned.c
index 33d5a5ce4a29..69b039ca8d83 100644
--- a/trunk/arch/mips/kernel/unaligned.c
+++ b/trunk/arch/mips/kernel/unaligned.c
@@ -109,6 +109,8 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value;
unsigned int res;
+ regs->regs[0] = 0;
+
/*
* This load never faults.
*/
diff --git a/trunk/arch/mips/mm/dma-default.c b/trunk/arch/mips/mm/dma-default.c
index 469d4019f795..7ba890860d98 100644
--- a/trunk/arch/mips/mm/dma-default.c
+++ b/trunk/arch/mips/mm/dma-default.c
@@ -44,39 +44,27 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
- gfp_t dma_flag;
-
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ZONE_DMA
if (dev == NULL)
- dma_flag = __GFP_DMA;
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
else
#endif
-#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
+#ifdef CONFIG_ZONE_DMA32
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- dma_flag = __GFP_DMA;
- else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA32;
- else
-#endif
-#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
- dma_flag = __GFP_DMA;
+ gfp |= __GFP_DMA32;
else
#endif
- dma_flag = 0;
+ ;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
- return gfp | dma_flag;
+ return gfp;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
diff --git a/trunk/arch/mips/mm/sc-rm7k.c b/trunk/arch/mips/mm/sc-rm7k.c
index 274af3be1442..1ef75cd80a0d 100644
--- a/trunk/arch/mips/mm/sc-rm7k.c
+++ b/trunk/arch/mips/mm/sc-rm7k.c
@@ -30,7 +30,7 @@
#define tc_lsize 32
extern unsigned long icache_way_size, dcache_way_size;
-static unsigned long tcache_size;
+unsigned long tcache_size;
#include
diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c
index b79b24afe3a2..15949b0be811 100644
--- a/trunk/arch/mips/mti-malta/malta-int.c
+++ b/trunk/arch/mips/mti-malta/malta-int.c
@@ -385,8 +385,6 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
*/
#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
-#define X GIC_UNUSED
-
static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
{ X, X, X, X, 0 },
@@ -406,7 +404,6 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
/* The remainder of this table is initialised by fill_ipi_map */
};
-#undef X
/*
* GCMP needs to be detected before any SMP initialisation
diff --git a/trunk/arch/mips/pci/pci-rc32434.c b/trunk/arch/mips/pci/pci-rc32434.c
index f31218e17d3c..71f7d27b0d4c 100644
--- a/trunk/arch/mips/pci/pci-rc32434.c
+++ b/trunk/arch/mips/pci/pci-rc32434.c
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void)
if (!((pcicvalue == PCIM_H_EA) ||
(pcicvalue == PCIM_H_IA_FIX) ||
(pcicvalue == PCIM_H_IA_RR))) {
- pr_err("PCI init error!!!\n");
+ pr_err(KERN_ERR "PCI init error!!!\n");
/* Not in Host Mode, return ERROR */
return -1;
}
diff --git a/trunk/arch/mips/pnx8550/common/reset.c b/trunk/arch/mips/pnx8550/common/reset.c
index e7a12ff304b9..fadd8744a6bc 100644
--- a/trunk/arch/mips/pnx8550/common/reset.c
+++ b/trunk/arch/mips/pnx8550/common/reset.c
@@ -22,19 +22,29 @@
*/
#include
-#include
#include
#include
void pnx8550_machine_restart(char *command)
{
+ char head[] = "************* Machine restart *************";
+ char foot[] = "*******************************************";
+
+ printk("\n\n");
+ printk("%s\n", head);
+ if (command != NULL)
+ printk("* %s\n", command);
+ printk("%s\n", foot);
+
PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
}
void pnx8550_machine_halt(void)
{
- while (1) {
- if (cpu_wait)
- cpu_wait();
- }
+ printk("*** Machine halt. (Not implemented) ***\n");
+}
+
+void pnx8550_machine_power_off(void)
+{
+ printk("*** Machine power off. (Not implemented) ***\n");
}
diff --git a/trunk/arch/mips/pnx8550/common/setup.c b/trunk/arch/mips/pnx8550/common/setup.c
index 43cb3945fdbf..64246c9c875c 100644
--- a/trunk/arch/mips/pnx8550/common/setup.c
+++ b/trunk/arch/mips/pnx8550/common/setup.c
@@ -44,6 +44,7 @@
extern void __init board_setup(void);
extern void pnx8550_machine_restart(char *);
extern void pnx8550_machine_halt(void);
+extern void pnx8550_machine_power_off(void);
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern char *prom_getcmdline(void);
@@ -99,7 +100,7 @@ void __init plat_mem_setup(void)
_machine_restart = pnx8550_machine_restart;
_machine_halt = pnx8550_machine_halt;
- pm_power_off = pnx8550_machine_halt;
+ pm_power_off = pnx8550_machine_power_off;
/* Clear the Global 2 Register, PCI Inta Output Enable Registers
Bit 1:Enable DAC Powerdown
diff --git a/trunk/arch/mn10300/Kconfig b/trunk/arch/mn10300/Kconfig
index 7c2a2f7f8dc1..444b9f918fdf 100644
--- a/trunk/arch/mn10300/Kconfig
+++ b/trunk/arch/mn10300/Kconfig
@@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
config MN10300
def_bool y
select HAVE_OPROFILE
+ select HAVE_ARCH_TRACEHOOK
config AM33
def_bool y
diff --git a/trunk/arch/mn10300/Kconfig.debug b/trunk/arch/mn10300/Kconfig.debug
index ce83c74b3fd7..ff80e86b9bd2 100644
--- a/trunk/arch/mn10300/Kconfig.debug
+++ b/trunk/arch/mn10300/Kconfig.debug
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
choice
prompt "GDB stub port"
- default GDBSTUB_ON_TTYSM0
+ default GDBSTUB_TTYSM0
depends on GDBSTUB
help
Select the serial port used for GDB-stub.
diff --git a/trunk/arch/mn10300/include/asm/bitops.h b/trunk/arch/mn10300/include/asm/bitops.h
index 3f50e9661076..f49ac49e09ad 100644
--- a/trunk/arch/mn10300/include/asm/bitops.h
+++ b/trunk/arch/mn10300/include/asm/bitops.h
@@ -229,9 +229,9 @@ int ffs(int x)
#include
#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (addr))
+ test_and_set_bit((nr) ^ 0x18, (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (addr))
+ test_and_clear_bit((nr) ^ 0x18, (addr))
#include
#include
diff --git a/trunk/arch/mn10300/include/asm/signal.h b/trunk/arch/mn10300/include/asm/signal.h
index 1865d72a86ff..7e891fce2370 100644
--- a/trunk/arch/mn10300/include/asm/signal.h
+++ b/trunk/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
+#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
diff --git a/trunk/arch/mn10300/kernel/mn10300-serial.c b/trunk/arch/mn10300/kernel/mn10300-serial.c
index db509dd80565..9d49073e827a 100644
--- a/trunk/arch/mn10300/kernel/mn10300-serial.c
+++ b/trunk/arch/mn10300/kernel/mn10300-serial.c
@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
._intr = &SC0ICR,
._rxb = &SC0RXB,
._txb = &SC0TXB,
- .rx_name = "ttySM0:Rx",
- .tx_name = "ttySM0:Tx",
+ .rx_name = "ttySM0/Rx",
+ .tx_name = "ttySM0/Tx",
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
- .tm_name = "ttySM0:Timer8",
+ .tm_name = "ttySM0/Timer8",
._tmxmd = &TM8MD,
._tmxbr = &TM8BR,
._tmicr = &TM8ICR,
.tm_irq = TM8IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
- .tm_name = "ttySM0:Timer2",
+ .tm_name = "ttySM0/Timer2",
._tmxmd = &TM2MD,
._tmxbr = (volatile u16 *) &TM2BR,
._tmicr = &TM2ICR,
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
._intr = &SC1ICR,
._rxb = &SC1RXB,
._txb = &SC1TXB,
- .rx_name = "ttySM1:Rx",
- .tx_name = "ttySM1:Tx",
+ .rx_name = "ttySM1/Rx",
+ .tx_name = "ttySM1/Tx",
#ifdef CONFIG_MN10300_TTYSM1_TIMER9
- .tm_name = "ttySM1:Timer9",
+ .tm_name = "ttySM1/Timer9",
._tmxmd = &TM9MD,
._tmxbr = &TM9BR,
._tmicr = &TM9ICR,
.tm_irq = TM9IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
- .tm_name = "ttySM1:Timer3",
+ .tm_name = "ttySM1/Timer3",
._tmxmd = &TM3MD,
._tmxbr = (volatile u16 *) &TM3BR,
._tmicr = &TM3ICR,
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
.uart.lock =
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
.name = "ttySM2",
- .rx_name = "ttySM2:Rx",
- .tx_name = "ttySM2:Tx",
- .tm_name = "ttySM2:Timer10",
+ .rx_name = "ttySM2/Rx",
+ .tx_name = "ttySM2/Tx",
+ .tm_name = "ttySM2/Timer10",
._iobase = &SC2CTR,
._control = &SC2CTR,
._status = &SC2STR,
diff --git a/trunk/arch/mn10300/kernel/module.c b/trunk/arch/mn10300/kernel/module.c
index 196a111e2e29..6aea7fd76993 100644
--- a/trunk/arch/mn10300/kernel/module.c
+++ b/trunk/arch/mn10300/kernel/module.c
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- return 0;
+ return module_bug_finalize(hdr, sechdrs, me);
}
/*
@@ -214,4 +214,5 @@ int module_finalize(const Elf_Ehdr *hdr,
*/
void module_arch_cleanup(struct module *mod)
{
+ module_bug_cleanup(mod);
}
diff --git a/trunk/arch/mn10300/kernel/signal.c b/trunk/arch/mn10300/kernel/signal.c
index d4de05ab7864..717db14c2cc3 100644
--- a/trunk/arch/mn10300/kernel/signal.c
+++ b/trunk/arch/mn10300/kernel/signal.c
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
old_sigset_t mask;
if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
- __get_user(mask, &act->sa_mask))
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
if (!ret && oact) {
if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -102,9 +102,6 @@ static int restore_sigcontext(struct pt_regs *regs,
{
unsigned int err = 0;
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
if (is_using_fpu(current))
fpu_kill_state(current);
@@ -333,6 +330,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
regs->d0 = sig;
regs->d1 = (unsigned long) &frame->sc;
+ set_fs(USER_DS);
+
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@@ -346,7 +345,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
return 0;
give_sigsegv:
- force_sigsegv(sig, current);
+ force_sig(SIGSEGV, current);
return -EFAULT;
}
@@ -414,6 +413,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->d0 = sig;
regs->d1 = (long) &frame->info;
+ set_fs(USER_DS);
+
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@@ -427,16 +428,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return 0;
give_sigsegv:
- force_sigsegv(sig, current);
+ force_sig(SIGSEGV, current);
return -EFAULT;
}
-static inline void stepback(struct pt_regs *regs)
-{
- regs->pc -= 2;
- regs->orig_d0 = -1;
-}
-
/*
* handle the actual delivery of a signal to userspace
*/
@@ -464,7 +459,7 @@ static int handle_signal(int sig,
/* fallthrough */
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
- stepback(regs);
+ regs->pc -= 2;
}
}
@@ -532,12 +527,12 @@ static void do_signal(struct pt_regs *regs)
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
- stepback(regs);
+ regs->pc -= 2;
break;
case -ERESTART_RESTARTBLOCK:
regs->d0 = __NR_restart_syscall;
- stepback(regs);
+ regs->pc -= 2;
break;
}
}
diff --git a/trunk/arch/mn10300/mm/Makefile b/trunk/arch/mn10300/mm/Makefile
index 1557277fbc5c..28b9d983db0c 100644
--- a/trunk/arch/mn10300/mm/Makefile
+++ b/trunk/arch/mn10300/mm/Makefile
@@ -2,11 +2,13 @@
# Makefile for the MN10300-specific memory management code
#
-cacheflush-y := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
-
-cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
-
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
- misalignment.o dma-alloc.o $(cacheflush-y)
+ misalignment.o dma-alloc.o
+
+ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+obj-y += cache.o cache-mn10300.o
+ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
+obj-y += cache-flush-mn10300.o
+endif
+endif
diff --git a/trunk/arch/mn10300/mm/cache.c b/trunk/arch/mn10300/mm/cache.c
index 9261217e8d2c..1b76719ec1c3 100644
--- a/trunk/arch/mn10300/mm/cache.c
+++ b/trunk/arch/mn10300/mm/cache.c
@@ -54,30 +54,13 @@ EXPORT_SYMBOL(flush_icache_page);
void flush_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_MN10300_CACHE_WBACK
- unsigned long addr, size, base, off;
+ unsigned long addr, size, off;
struct page *page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ppte, pte;
- if (end > 0x80000000UL) {
- /* addresses above 0xa0000000 do not go through the cache */
- if (end > 0xa0000000UL) {
- end = 0xa0000000UL;
- if (start >= end)
- return;
- }
-
- /* kernel addresses between 0x80000000 and 0x9fffffff do not
- * require page tables, so we just map such addresses directly */
- base = (start >= 0x80000000UL) ? start : 0x80000000UL;
- mn10300_dcache_flush_range(base, end);
- if (base == start)
- goto invalidate;
- end = base;
- }
-
for (; start < end; start += size) {
/* work out how much of the page to flush */
off = start & (PAGE_SIZE - 1);
@@ -121,7 +104,6 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
#endif
-invalidate:
mn10300_icache_inv();
}
EXPORT_SYMBOL(flush_icache_range);
diff --git a/trunk/arch/parisc/Kconfig b/trunk/arch/parisc/Kconfig
index 79a04a9394d5..907417d187e1 100644
--- a/trunk/arch/parisc/Kconfig
+++ b/trunk/arch/parisc/Kconfig
@@ -16,7 +16,6 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
help
diff --git a/trunk/arch/parisc/include/asm/compat.h b/trunk/arch/parisc/include/asm/compat.h
index efa0b60c63fe..02b77baa5da6 100644
--- a/trunk/arch/parisc/include/asm/compat.h
+++ b/trunk/arch/parisc/include/asm/compat.h
@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-static __inline__ void __user *arch_compat_alloc_user_space(long len)
+static __inline__ void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = ¤t->thread.regs;
return (void __user *)regs->gr[30];
diff --git a/trunk/arch/parisc/include/asm/perf_event.h b/trunk/arch/parisc/include/asm/perf_event.h
index 1e0fd8ba6c03..cc146427d8f9 100644
--- a/trunk/arch/parisc/include/asm/perf_event.h
+++ b/trunk/arch/parisc/include/asm/perf_event.h
@@ -1,6 +1,7 @@
#ifndef __ASM_PARISC_PERF_EVENT_H
#define __ASM_PARISC_PERF_EVENT_H
-/* Empty, just to avoid compiling error */
+/* parisc only supports software events through this interface. */
+static inline void set_perf_event_pending(void) { }
#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/trunk/arch/parisc/kernel/module.c b/trunk/arch/parisc/kernel/module.c
index 6e81bb596e5b..159a2b81e90c 100644
--- a/trunk/arch/parisc/kernel/module.c
+++ b/trunk/arch/parisc/kernel/module.c
@@ -941,10 +941,11 @@ int module_finalize(const Elf_Ehdr *hdr,
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
- return 0;
+ return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
+ module_bug_cleanup(mod);
}
diff --git a/trunk/arch/powerpc/Kconfig b/trunk/arch/powerpc/Kconfig
index 4b1e521d966f..631e5a0fb6ab 100644
--- a/trunk/arch/powerpc/Kconfig
+++ b/trunk/arch/powerpc/Kconfig
@@ -138,7 +138,6 @@ config PPC
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
diff --git a/trunk/arch/powerpc/include/asm/compat.h b/trunk/arch/powerpc/include/asm/compat.h
index a11d4eac4f97..396d21a80058 100644
--- a/trunk/arch/powerpc/include/asm/compat.h
+++ b/trunk/arch/powerpc/include/asm/compat.h
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = current->thread.regs;
unsigned long usp = regs->gpr[1];
diff --git a/trunk/arch/powerpc/include/asm/paca.h b/trunk/arch/powerpc/include/asm/paca.h
index 9b287fdd8ea3..1ff6662f7faf 100644
--- a/trunk/arch/powerpc/include/asm/paca.h
+++ b/trunk/arch/powerpc/include/asm/paca.h
@@ -129,7 +129,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
- u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
+ u8 perf_event_pending; /* PM interrupt while soft-disabled */
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/trunk/arch/powerpc/include/asm/system.h b/trunk/arch/powerpc/include/asm/system.h
index 9c3d160670b4..6c294acac848 100644
--- a/trunk/arch/powerpc/include/asm/system.h
+++ b/trunk/arch/powerpc/include/asm/system.h
@@ -542,6 +542,10 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
extern struct dentry *powerpc_debugfs_root;
#endif /* __KERNEL__ */
diff --git a/trunk/arch/powerpc/kernel/module.c b/trunk/arch/powerpc/kernel/module.c
index 49cee9df225b..477c663e0140 100644
--- a/trunk/arch/powerpc/kernel/module.c
+++ b/trunk/arch/powerpc/kernel/module.c
@@ -63,6 +63,11 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
+ int err;
+
+ err = module_bug_finalize(hdr, sechdrs, me);
+ if (err)
+ return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -96,4 +101,5 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
+ module_bug_cleanup(mod);
}
diff --git a/trunk/arch/powerpc/kernel/perf_callchain.c b/trunk/arch/powerpc/kernel/perf_callchain.c
index d05ae4204bbf..95ad9dad298e 100644
--- a/trunk/arch/powerpc/kernel/perf_callchain.c
+++ b/trunk/arch/powerpc/kernel/perf_callchain.c
@@ -23,6 +23,18 @@
#include "ppc32.h"
#endif
+/*
+ * Store another value in a callchain_entry.
+ */
+static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ unsigned int nr = entry->nr;
+
+ if (nr < PERF_MAX_STACK_DEPTH) {
+ entry->ip[nr] = ip;
+ entry->nr = nr + 1;
+ }
+}
/*
* Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -46,8 +58,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+static void perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -57,7 +69,8 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, regs->nip);
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, regs->nip);
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return;
@@ -76,7 +89,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
next_ip = regs->nip;
lr = regs->link;
level = 0;
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
} else {
if (level == 0)
@@ -98,7 +111,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
++level;
}
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, next_ip);
if (!valid_next_sp(next_sp, sp))
return;
sp = next_sp;
@@ -220,8 +233,8 @@ static int sane_signal_64_frame(unsigned long sp)
puc == (unsigned long) &sf->uc;
}
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static void perf_callchain_user_64(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -233,7 +246,8 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
for (;;) {
fp = (unsigned long __user *) sp;
@@ -262,14 +276,14 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
@@ -301,8 +315,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
return __get_user_inatomic(*ret, ptr);
}
-static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static inline void perf_callchain_user_64(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
}
@@ -421,8 +435,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
return mctx->mc_gregs;
}
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static void perf_callchain_user_32(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned int sp, next_sp;
unsigned int next_ip;
@@ -433,7 +447,8 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned int __user *) (unsigned long) sp;
@@ -455,24 +470,45 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
- perf_callchain_store(entry, next_ip);
+ callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
-void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+/*
+ * Since we can't get PMU interrupts inside a PMU interrupt handler,
+ * we don't need separate irq and nmi entries here.
+ */
+static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
- if (current_is_64bit())
- perf_callchain_user_64(entry, regs);
- else
- perf_callchain_user_32(entry, regs);
+ struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
+
+ entry->nr = 0;
+
+ if (!user_mode(regs)) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ if (current_is_64bit())
+ perf_callchain_user_64(regs, entry);
+ else
+ perf_callchain_user_32(regs, entry);
+ }
+
+ return entry;
}
diff --git a/trunk/arch/powerpc/kernel/perf_event.c b/trunk/arch/powerpc/kernel/perf_event.c
index 3129c855933c..d301a30445e0 100644
--- a/trunk/arch/powerpc/kernel/perf_event.c
+++ b/trunk/arch/powerpc/kernel/perf_event.c
@@ -402,9 +402,6 @@ static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- if (event->hw.state & PERF_HES_STOPPED)
- return;
-
if (!event->hw.idx)
return;
/*
@@ -520,7 +517,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
-static void power_pmu_disable(struct pmu *pmu)
+void hw_perf_disable(void)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -568,7 +565,7 @@ static void power_pmu_disable(struct pmu *pmu)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
-static void power_pmu_enable(struct pmu *pmu)
+void hw_perf_enable(void)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
@@ -675,8 +672,6 @@ static void power_pmu_enable(struct pmu *pmu)
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
- if (event->hw.state & PERF_HES_STOPPED)
- val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
@@ -732,7 +727,7 @@ static int collect_events(struct perf_event *group, int max_count,
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
-static int power_pmu_add(struct perf_event *event, int ef_flags)
+static int power_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -740,7 +735,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
int ret = -EAGAIN;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
+ perf_disable();
/*
* Add the event to the list (if there is room)
@@ -754,9 +749,6 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
- if (!(ef_flags & PERF_EF_START))
- event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
-
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
@@ -777,7 +769,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
ret = 0;
out:
- perf_pmu_enable(event->pmu);
+ perf_enable();
local_irq_restore(flags);
return ret;
}
@@ -785,14 +777,14 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
/*
* Remove a event from the PMU.
*/
-static void power_pmu_del(struct perf_event *event, int ef_flags)
+static void power_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
+ perf_disable();
power_pmu_read(event);
@@ -829,60 +821,34 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
- perf_pmu_enable(event->pmu);
+ perf_enable();
local_irq_restore(flags);
}
/*
- * POWER-PMU does not support disabling individual counters, hence
- * program their cycle counter to their max value and ignore the interrupts.
+ * Re-enable interrupts on a event after they were throttled
+ * because they were coming too fast.
*/
-
-static void power_pmu_start(struct perf_event *event, int ef_flags)
-{
- unsigned long flags;
- s64 left;
-
- if (!event->hw.idx || !event->hw.sample_period)
- return;
-
- if (!(event->hw.state & PERF_HES_STOPPED))
- return;
-
- if (ef_flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
-
- local_irq_save(flags);
- perf_pmu_disable(event->pmu);
-
- event->hw.state = 0;
- left = local64_read(&event->hw.period_left);
- write_pmc(event->hw.idx, left);
-
- perf_event_update_userpage(event);
- perf_pmu_enable(event->pmu);
- local_irq_restore(flags);
-}
-
-static void power_pmu_stop(struct perf_event *event, int ef_flags)
+static void power_pmu_unthrottle(struct perf_event *event)
{
+ s64 val, left;
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
-
- if (event->hw.state & PERF_HES_STOPPED)
- return;
-
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
-
+ perf_disable();
power_pmu_read(event);
- event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
- write_pmc(event->hw.idx, 0);
-
+ left = event->hw.sample_period;
+ event->hw.last_period = left;
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
- perf_pmu_enable(event->pmu);
+ perf_enable();
local_irq_restore(flags);
}
@@ -891,11 +857,10 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-void power_pmu_start_txn(struct pmu *pmu)
+void power_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
- perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
@@ -905,12 +870,11 @@ void power_pmu_start_txn(struct pmu *pmu)
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-void power_pmu_cancel_txn(struct pmu *pmu)
+void power_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
- perf_pmu_enable(pmu);
}
/*
@@ -918,7 +882,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-int power_pmu_commit_txn(struct pmu *pmu)
+int power_pmu_commit_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
@@ -937,10 +901,19 @@ int power_pmu_commit_txn(struct pmu *pmu)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
- perf_pmu_enable(pmu);
return 0;
}
+struct pmu power_pmu = {
+ .enable = power_pmu_enable,
+ .disable = power_pmu_disable,
+ .read = power_pmu_read,
+ .unthrottle = power_pmu_unthrottle,
+ .start_txn = power_pmu_start_txn,
+ .cancel_txn = power_pmu_cancel_txn,
+ .commit_txn = power_pmu_commit_txn,
+};
+
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
@@ -1041,7 +1014,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}
-static int power_pmu_event_init(struct perf_event *event)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
@@ -1053,27 +1026,25 @@ static int power_pmu_event_init(struct perf_event *event)
struct cpu_hw_events *cpuhw;
if (!ppmu)
- return -ENOENT;
-
+ return ERR_PTR(-ENXIO);
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
- return err;
+ return ERR_PTR(err);
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
- return -ENOENT;
+ return ERR_PTR(-EINVAL);
}
-
event->hw.config_base = ev;
event->hw.idx = 0;
@@ -1092,7 +1063,7 @@ static int power_pmu_event_init(struct perf_event *event)
* XXX we should check if the task is an idle task.
*/
flags = 0;
- if (event->attach_state & PERF_ATTACH_TASK)
+ if (event->ctx->task)
flags |= PPMU_ONLY_COUNT_RUN;
/*
@@ -1110,7 +1081,7 @@ static int power_pmu_event_init(struct perf_event *event)
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
}
@@ -1124,19 +1095,19 @@ static int power_pmu_event_init(struct perf_event *event)
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
event->hw.config = events[n];
event->hw.event_base = cflags[n];
@@ -1161,23 +1132,11 @@ static int power_pmu_event_init(struct perf_event *event)
}
event->destroy = hw_perf_event_destroy;
- return err;
+ if (err)
+ return ERR_PTR(err);
+ return &power_pmu;
}
-struct pmu power_pmu = {
- .pmu_enable = power_pmu_enable,
- .pmu_disable = power_pmu_disable,
- .event_init = power_pmu_event_init,
- .add = power_pmu_add,
- .del = power_pmu_del,
- .start = power_pmu_start,
- .stop = power_pmu_stop,
- .read = power_pmu_read,
- .start_txn = power_pmu_start_txn,
- .cancel_txn = power_pmu_cancel_txn,
- .commit_txn = power_pmu_commit_txn,
-};
-
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -1190,11 +1149,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
s64 prev, delta, left;
int record = 0;
- if (event->hw.state & PERF_HES_STOPPED) {
- write_pmc(event->hw.idx, 0);
- return;
- }
-
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
@@ -1217,11 +1171,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
val = 0x80000000LL - left;
}
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
- perf_event_update_userpage(event);
-
/*
* Finally record data if requested.
*/
@@ -1234,9 +1183,23 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_event_overflow(event, nmi, &data, regs))
- power_pmu_stop(event, 0);
+ if (perf_event_overflow(event, nmi, &data, regs)) {
+ /*
+ * Interrupts are coming too fast - throttle them
+ * by setting the event to 0, so it will be
+ * at least 2^30 cycles until the next interrupt
+ * (assuming each event counts at most 2 counts
+ * per cycle).
+ */
+ val = 0;
+ left = ~0ULL >> 1;
+ }
}
+
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
}
/*
@@ -1379,7 +1342,6 @@ int register_power_pmu(struct power_pmu *pmu)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
- perf_pmu_register(&power_pmu);
perf_cpu_notifier(power_pmu_notifier);
return 0;
diff --git a/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c b/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c
index 7ecca59ddf77..1ba45471ae43 100644
--- a/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/trunk/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -156,9 +156,6 @@ static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- if (event->hw.state & PERF_HES_STOPPED)
- return;
-
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
@@ -180,7 +177,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
-static void fsl_emb_pmu_disable(struct pmu *pmu)
+void hw_perf_disable(void)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -219,7 +216,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
-static void fsl_emb_pmu_enable(struct pmu *pmu)
+void hw_perf_enable(void)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -265,8 +262,8 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-/* context locked on entry */
-static int fsl_emb_pmu_add(struct perf_event *event, int flags)
+/* perf must be disabled, context locked on entry */
+static int fsl_emb_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
@@ -274,7 +271,6 @@ static int fsl_emb_pmu_add(struct perf_event *event, int flags)
u64 val;
int i;
- perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -305,12 +301,6 @@ static int fsl_emb_pmu_add(struct perf_event *event, int flags)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
-
- if (!(flags & PERF_EF_START)) {
- event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- val = 0;
- }
-
write_pmc(i, val);
perf_event_update_userpage(event);
@@ -320,17 +310,15 @@ static int fsl_emb_pmu_add(struct perf_event *event, int flags)
ret = 0;
out:
put_cpu_var(cpu_hw_events);
- perf_pmu_enable(event->pmu);
return ret;
}
-/* context locked on entry */
-static void fsl_emb_pmu_del(struct perf_event *event, int flags)
+/* perf must be disabled, context locked on entry */
+static void fsl_emb_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
- perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
@@ -358,58 +346,45 @@ static void fsl_emb_pmu_del(struct perf_event *event, int flags)
cpuhw->n_events--;
out:
- perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
-static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
-{
- unsigned long flags;
- s64 left;
-
- if (event->hw.idx < 0 || !event->hw.sample_period)
- return;
-
- if (!(event->hw.state & PERF_HES_STOPPED))
- return;
-
- if (ef_flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
-
- local_irq_save(flags);
- perf_pmu_disable(event->pmu);
-
- event->hw.state = 0;
- left = local64_read(&event->hw.period_left);
- write_pmc(event->hw.idx, left);
-
- perf_event_update_userpage(event);
- perf_pmu_enable(event->pmu);
- local_irq_restore(flags);
-}
-
-static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+/*
+ * Re-enable interrupts on a event after they were throttled
+ * because they were coming too fast.
+ *
+ * Context is locked on entry, but perf is not disabled.
+ */
+static void fsl_emb_pmu_unthrottle(struct perf_event *event)
{
+ s64 val, left;
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
-
- if (event->hw.state & PERF_HES_STOPPED)
- return;
-
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
-
+ perf_disable();
fsl_emb_pmu_read(event);
- event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
- write_pmc(event->hw.idx, 0);
-
+ left = event->hw.sample_period;
+ event->hw.last_period = left;
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
- perf_pmu_enable(event->pmu);
+ perf_enable();
local_irq_restore(flags);
}
+static struct pmu fsl_emb_pmu = {
+ .enable = fsl_emb_pmu_enable,
+ .disable = fsl_emb_pmu_disable,
+ .read = fsl_emb_pmu_read,
+ .unthrottle = fsl_emb_pmu_unthrottle,
+};
+
/*
* Release the PMU if this is the last perf_event.
*/
@@ -453,7 +428,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}
-static int fsl_emb_pmu_event_init(struct perf_event *event)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
@@ -466,14 +441,14 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
- return err;
+ return ERR_PTR(err);
break;
case PERF_TYPE_RAW:
@@ -481,12 +456,12 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
break;
default:
- return -ENOENT;
+ return ERR_PTR(-EINVAL);
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/*
* If this is in a group, check if it can go on with all the
@@ -498,7 +473,7 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -509,7 +484,7 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
}
if (num_restricted >= ppmu->n_restricted)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
event->hw.idx = -1;
@@ -522,7 +497,7 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
- return -ENOTSUPP;
+ return ERR_PTR(-ENOTSUPP);
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
@@ -548,20 +523,11 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
}
event->destroy = hw_perf_event_destroy;
- return err;
+ if (err)
+ return ERR_PTR(err);
+ return &fsl_emb_pmu;
}
-static struct pmu fsl_emb_pmu = {
- .pmu_enable = fsl_emb_pmu_enable,
- .pmu_disable = fsl_emb_pmu_disable,
- .event_init = fsl_emb_pmu_event_init,
- .add = fsl_emb_pmu_add,
- .del = fsl_emb_pmu_del,
- .start = fsl_emb_pmu_start,
- .stop = fsl_emb_pmu_stop,
- .read = fsl_emb_pmu_read,
-};
-
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -574,11 +540,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
s64 prev, delta, left;
int record = 0;
- if (event->hw.state & PERF_HES_STOPPED) {
- write_pmc(event->hw.idx, 0);
- return;
- }
-
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
@@ -601,11 +562,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
val = 0x80000000LL - left;
}
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
- perf_event_update_userpage(event);
-
/*
* Finally record data if requested.
*/
@@ -615,9 +571,23 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
- if (perf_event_overflow(event, nmi, &data, regs))
- fsl_emb_pmu_stop(event, 0);
+ if (perf_event_overflow(event, nmi, &data, regs)) {
+ /*
+ * Interrupts are coming too fast - throttle them
+ * by setting the event to 0, so it will be
+ * at least 2^30 cycles until the next interrupt
+ * (assuming each event counts at most 2 counts
+ * per cycle).
+ */
+ val = 0;
+ left = ~0ULL >> 1;
+ }
}
+
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
}
static void perf_event_interrupt(struct pt_regs *regs)
@@ -681,7 +651,5 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
- perf_pmu_register(&fsl_emb_pmu);
-
return 0;
}
diff --git a/trunk/arch/powerpc/kernel/signal.c b/trunk/arch/powerpc/kernel/signal.c
index 2300426e531a..7109f5b1baa8 100644
--- a/trunk/arch/powerpc/kernel/signal.c
+++ b/trunk/arch/powerpc/kernel/signal.c
@@ -138,7 +138,6 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
- regs->trap = 0;
return 0; /* no signals delivered */
}
@@ -165,7 +164,6 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
}
- regs->trap = 0;
if (ret) {
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked,
diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c
index b96a3a010c26..266610119f66 100644
--- a/trunk/arch/powerpc/kernel/signal_32.c
+++ b/trunk/arch/powerpc/kernel/signal_32.c
@@ -511,7 +511,6 @@ static long restore_user_regs(struct pt_regs *regs,
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
- regs->trap = 0;
err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
@@ -885,6 +884,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
+ regs->trap = 0;
return 1;
badframe:
@@ -1228,6 +1228,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
+ regs->trap = 0;
return 1;
diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c
index 27c4a4584f80..2fe6fc64b614 100644
--- a/trunk/arch/powerpc/kernel/signal_64.c
+++ b/trunk/arch/powerpc/kernel/signal_64.c
@@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
/* skip SOFTE */
- regs->trap = 0;
+ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c
index 54888eb10c3b..8533b3b83f5d 100644
--- a/trunk/arch/powerpc/kernel/time.c
+++ b/trunk/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_IRQ_WORK
+#ifdef CONFIG_PERF_EVENTS
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
-static inline unsigned long test_irq_work_pending(void)
+static inline unsigned long test_perf_event_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
- : "i" (offsetof(struct paca_struct, irq_work_pending)));
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
return x;
}
-static inline void set_irq_work_pending_flag(void)
+static inline void set_perf_event_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
- "i" (offsetof(struct paca_struct, irq_work_pending)));
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
}
-static inline void clear_irq_work_pending(void)
+static inline void clear_perf_event_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
- "i" (offsetof(struct paca_struct, irq_work_pending)));
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
}
#else /* 32-bit */
-DEFINE_PER_CPU(u8, irq_work_pending);
+DEFINE_PER_CPU(u8, perf_event_pending);
-#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
-#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
-#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
+#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
+#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
+#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
#endif /* 32 vs 64 bit */
-void set_irq_work_pending(void)
+void set_perf_event_pending(void)
{
preempt_disable();
- set_irq_work_pending_flag();
+ set_perf_event_pending_flag();
set_dec(1);
preempt_enable();
}
-#else /* CONFIG_IRQ_WORK */
+#else /* CONFIG_PERF_EVENTS */
-#define test_irq_work_pending() 0
-#define clear_irq_work_pending()
+#define test_perf_event_pending() 0
+#define clear_perf_event_pending()
-#endif /* CONFIG_IRQ_WORK */
+#endif /* CONFIG_PERF_EVENTS */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
- if (test_irq_work_pending()) {
- clear_irq_work_pending();
- irq_work_run();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
}
#ifdef CONFIG_PPC_ISERIES
diff --git a/trunk/arch/powerpc/platforms/512x/clock.c b/trunk/arch/powerpc/platforms/512x/clock.c
index 3dc2a8d262b8..5b243bd3eb3b 100644
--- a/trunk/arch/powerpc/platforms/512x/clock.c
+++ b/trunk/arch/powerpc/platforms/512x/clock.c
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
int id_match = 0;
if (dev == NULL || id == NULL)
- return clk;
+ return NULL;
mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
diff --git a/trunk/arch/powerpc/platforms/52xx/efika.c b/trunk/arch/powerpc/platforms/52xx/efika.c
index 18c104820198..45c0cb9b67e6 100644
--- a/trunk/arch/powerpc/platforms/52xx/efika.c
+++ b/trunk/arch/powerpc/platforms/52xx/efika.c
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't get bus-range for %s\n", pcictrl->full_name);
- goto out_put;
+ return;
}
if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
printk(" controlled by %s\n", pcictrl->full_name);
printk("\n");
- hose = pcibios_alloc_controller(pcictrl);
+ hose = pcibios_alloc_controller(of_node_get(pcictrl));
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't allocate PCI controller structure for %s\n",
pcictrl->full_name);
- goto out_put;
+ return;
}
hose->first_busno = bus_range[0];
@@ -124,9 +124,6 @@ static void __init efika_pcisetup(void)
hose->ops = &rtas_pci_ops;
pci_process_bridge_OF_ranges(hose, pcictrl, 0);
- return;
-out_put:
- of_node_put(pcictrl);
}
#else
diff --git a/trunk/arch/powerpc/platforms/52xx/mpc52xx_common.c b/trunk/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 41f3a7eda1de..6e905314ad5d 100644
--- a/trunk/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/trunk/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -325,16 +325,12 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
clrbits32(&simple_gpio->simple_dvo, sync | out);
clrbits8(&wkup_gpio->wkup_dvo, reset);
- /* wait for 1 us */
- udelay(1);
+ /* wait at lease 1 us */
+ udelay(2);
/* Deassert reset */
setbits8(&wkup_gpio->wkup_dvo, reset);
- /* wait at least 200ns */
- /* 7 ~= (200ns * timebase) / ns2sec */
- __delay(7);
-
/* Restore pin-muxing */
out_be32(&simple_gpio->port_config, mux);
diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig
index 75976a141947..f0777a47e3a5 100644
--- a/trunk/arch/s390/Kconfig
+++ b/trunk/arch/s390/Kconfig
@@ -95,7 +95,6 @@ config S390
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
@@ -199,13 +198,6 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
-config SCHED_BOOK
- bool "Book scheduler support"
- depends on SMP
- help
- Book scheduler support improves the CPU scheduler's decision making
- when dealing with machines that have several books.
-
config MATHEMU
bool "IEEE FPU emulation"
depends on MARCH_G5
diff --git a/trunk/arch/s390/include/asm/compat.h b/trunk/arch/s390/include/asm/compat.h
index a875c2f542e1..104f2007f097 100644
--- a/trunk/arch/s390/include/asm/compat.h
+++ b/trunk/arch/s390/include/asm/compat.h
@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
#endif
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
unsigned long stack;
diff --git a/trunk/arch/s390/include/asm/hardirq.h b/trunk/arch/s390/include/asm/hardirq.h
index 881d94590aeb..498bc3892385 100644
--- a/trunk/arch/s390/include/asm/hardirq.h
+++ b/trunk/arch/s390/include/asm/hardirq.h
@@ -12,6 +12,10 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
+#include
+#include
+#include
+#include
#include
#define local_softirq_pending() (S390_lowcore.softirq_pending)
diff --git a/trunk/arch/s390/include/asm/perf_event.h b/trunk/arch/s390/include/asm/perf_event.h
index a75f168d2718..3840cbe77637 100644
--- a/trunk/arch/s390/include/asm/perf_event.h
+++ b/trunk/arch/s390/include/asm/perf_event.h
@@ -4,6 +4,7 @@
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
*/
-/* Empty, just to avoid compiling error */
+static inline void set_perf_event_pending(void) {}
+static inline void clear_perf_event_pending(void) {}
#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/trunk/arch/s390/include/asm/system.h b/trunk/arch/s390/include/asm/system.h
index 38ddd8a9a9e8..cef66210c846 100644
--- a/trunk/arch/s390/include/asm/system.h
+++ b/trunk/arch/s390/include/asm/system.h
@@ -97,6 +97,7 @@ static inline void restore_access_regs(unsigned int *acrs)
extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
+extern void account_system_vtime(struct task_struct *);
#ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
diff --git a/trunk/arch/s390/include/asm/topology.h b/trunk/arch/s390/include/asm/topology.h
index 051107a2c5e2..831bd033ea77 100644
--- a/trunk/arch/s390/include/asm/topology.h
+++ b/trunk/arch/s390/include/asm/topology.h
@@ -3,32 +3,15 @@
#include
+#define mc_capable() (1)
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
-static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
-{
- return &cpu_core_map[cpu];
-}
-
#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
-#define mc_capable() (1)
-
-#ifdef CONFIG_SCHED_BOOK
-
-extern unsigned char cpu_book_id[NR_CPUS];
-extern cpumask_t cpu_book_map[NR_CPUS];
-
-static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
-{
- return &cpu_book_map[cpu];
-}
-
-#define topology_book_id(cpu) (cpu_book_id[cpu])
-#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
-
-#endif /* CONFIG_SCHED_BOOK */
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
@@ -47,8 +30,6 @@ static inline void s390_init_cpu_topology(void)
};
#endif
-#define SD_BOOK_INIT SD_CPU_INIT
-
#include
#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/trunk/arch/s390/kernel/module.c b/trunk/arch/s390/kernel/module.c
index f7167ee4604c..22cfd634c355 100644
--- a/trunk/arch/s390/kernel/module.c
+++ b/trunk/arch/s390/kernel/module.c
@@ -407,9 +407,10 @@ int module_finalize(const Elf_Ehdr *hdr,
{
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
- return 0;
+ return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
+ module_bug_cleanup(mod);
}
diff --git a/trunk/arch/s390/kernel/topology.c b/trunk/arch/s390/kernel/topology.c
index 13559c993847..bcef00766a64 100644
--- a/trunk/arch/s390/kernel/topology.c
+++ b/trunk/arch/s390/kernel/topology.c
@@ -57,8 +57,8 @@ struct tl_info {
union tl_entry tle[0];
};
-struct mask_info {
- struct mask_info *next;
+struct core_info {
+ struct core_info *next;
unsigned char id;
cpumask_t mask;
};
@@ -66,6 +66,7 @@ struct mask_info {
static int topology_enabled;
static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info;
+static struct core_info core_info;
static int machine_has_topology;
static struct timer_list topology_timer;
static void set_topology_timer(void);
@@ -73,37 +74,38 @@ static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);
-static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
-#ifdef CONFIG_SCHED_BOOK
-static struct mask_info book_info;
-cpumask_t cpu_book_map[NR_CPUS];
-unsigned char cpu_book_id[NR_CPUS];
-#endif
-
-static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
+static cpumask_t cpu_coregroup_map(unsigned int cpu)
{
+ struct core_info *core = &core_info;
+ unsigned long flags;
cpumask_t mask;
cpus_clear(mask);
if (!topology_enabled || !machine_has_topology)
return cpu_possible_map;
- while (info) {
- if (cpu_isset(cpu, info->mask)) {
- mask = info->mask;
+ spin_lock_irqsave(&topology_lock, flags);
+ while (core) {
+ if (cpu_isset(cpu, core->mask)) {
+ mask = core->mask;
break;
}
- info = info->next;
+ core = core->next;
}
+ spin_unlock_irqrestore(&topology_lock, flags);
if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu);
return mask;
}
-static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
- struct mask_info *core)
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
+static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
{
unsigned int cpu;
@@ -115,35 +117,23 @@ static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
- if (cpu_logical_map(lcpu) != rcpu)
- continue;
-#ifdef CONFIG_SCHED_BOOK
- cpu_set(lcpu, book->mask);
- cpu_book_id[lcpu] = book->id;
-#endif
- cpu_set(lcpu, core->mask);
- cpu_core_id[lcpu] = core->id;
- smp_cpu_polarization[lcpu] = tl_cpu->pp;
+ if (cpu_logical_map(lcpu) == rcpu) {
+ cpu_set(lcpu, core->mask);
+ cpu_core_id[lcpu] = core->id;
+ smp_cpu_polarization[lcpu] = tl_cpu->pp;
+ }
}
}
}
-static void clear_masks(void)
+static void clear_cores(void)
{
- struct mask_info *info;
+ struct core_info *core = &core_info;
- info = &core_info;
- while (info) {
- cpus_clear(info->mask);
- info = info->next;
- }
-#ifdef CONFIG_SCHED_BOOK
- info = &book_info;
- while (info) {
- cpus_clear(info->mask);
- info = info->next;
+ while (core) {
+ cpus_clear(core->mask);
+ core = core->next;
}
-#endif
}
static union tl_entry *next_tle(union tl_entry *tle)
@@ -156,36 +146,29 @@ static union tl_entry *next_tle(union tl_entry *tle)
static void tl_to_cores(struct tl_info *info)
{
-#ifdef CONFIG_SCHED_BOOK
- struct mask_info *book = &book_info;
-#else
- struct mask_info *book = NULL;
-#endif
- struct mask_info *core = &core_info;
union tl_entry *tle, *end;
-
+ struct core_info *core = &core_info;
spin_lock_irq(&topology_lock);
- clear_masks();
+ clear_cores();
tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
-#ifdef CONFIG_SCHED_BOOK
+ case 5:
+ case 4:
+ case 3:
case 2:
- book = book->next;
- book->id = tle->container.id;
break;
-#endif
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
- add_cpus_to_mask(&tle->cpu, book, core);
+ add_cpus_to_core(&tle->cpu, core);
break;
default:
- clear_masks();
+ clear_cores();
machine_has_topology = 0;
goto out;
}
@@ -238,29 +221,10 @@ int topology_set_cpu_management(int fc)
static void update_cpu_core_map(void)
{
- unsigned long flags;
int cpu;
- spin_lock_irqsave(&topology_lock, flags);
- for_each_possible_cpu(cpu) {
- cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
-#ifdef CONFIG_SCHED_BOOK
- cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
-#endif
- }
- spin_unlock_irqrestore(&topology_lock, flags);
-}
-
-static void store_topology(struct tl_info *info)
-{
-#ifdef CONFIG_SCHED_BOOK
- int rc;
-
- rc = stsi(info, 15, 1, 3);
- if (rc != -ENOSYS)
- return;
-#endif
- stsi(info, 15, 1, 2);
+ for_each_possible_cpu(cpu)
+ cpu_core_map[cpu] = cpu_coregroup_map(cpu);
}
int arch_update_cpu_topology(void)
@@ -274,7 +238,7 @@ int arch_update_cpu_topology(void)
topology_update_polarization_simple();
return 0;
}
- store_topology(info);
+ stsi(info, 15, 1, 2);
tl_to_cores(info);
update_cpu_core_map();
for_each_online_cpu(cpu) {
@@ -335,24 +299,12 @@ static int __init init_topology_update(void)
}
__initcall(init_topology_update);
-static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
-{
- int i, nr_masks;
-
- nr_masks = info->mag[NR_MAG - offset];
- for (i = 0; i < info->mnest - offset; i++)
- nr_masks *= info->mag[NR_MAG - offset - 1 - i];
- nr_masks = max(nr_masks, 1);
- for (i = 0; i < nr_masks; i++) {
- mask->next = alloc_bootmem(sizeof(struct mask_info));
- mask = mask->next;
- }
-}
-
void __init s390_init_cpu_topology(void)
{
unsigned long long facility_bits;
struct tl_info *info;
+ struct core_info *core;
+ int nr_cores;
int i;
if (stfle(&facility_bits, 1) <= 0)
@@ -363,13 +315,25 @@ void __init s390_init_cpu_topology(void)
tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info;
- store_topology(info);
+ stsi(info, 15, 1, 2);
+
+ nr_cores = info->mag[NR_MAG - 2];
+ for (i = 0; i < info->mnest - 2; i++)
+ nr_cores *= info->mag[NR_MAG - 3 - i];
+
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
- alloc_masks(info, &core_info, 2);
-#ifdef CONFIG_SCHED_BOOK
- alloc_masks(info, &book_info, 3);
-#endif
+
+ core = &core_info;
+ for (i = 0; i < nr_cores; i++) {
+ core->next = alloc_bootmem(sizeof(struct core_info));
+ core = core->next;
+ if (!core)
+ goto error;
+ }
+ return;
+error:
+ machine_has_topology = 0;
}
diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig
index 35b6879628a0..33990fa95af0 100644
--- a/trunk/arch/sh/Kconfig
+++ b/trunk/arch/sh/Kconfig
@@ -16,7 +16,6 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP
@@ -250,11 +249,6 @@ config ARCH_SHMOBILE
select PM
select PM_RUNTIME
-config CPU_HAS_PMU
- depends on CPU_SH4 || CPU_SH4A
- default y
- bool
-
if SUPERH32
choice
@@ -744,14 +738,6 @@ config GUSA_RB
LLSC, this should be more efficient than the other alternative of
disabling interrupts around the atomic sequence.
-config HW_PERF_EVENTS
- bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && CPU_HAS_PMU
- default y
- help
- Enable hardware performance counter support for perf events. If
- disabled, perf events will use software events only.
-
source "drivers/sh/Kconfig"
endmenu
diff --git a/trunk/arch/sh/include/asm/perf_event.h b/trunk/arch/sh/include/asm/perf_event.h
index 14308bed7ea5..3d0c9f36d150 100644
--- a/trunk/arch/sh/include/asm/perf_event.h
+++ b/trunk/arch/sh/include/asm/perf_event.h
@@ -26,4 +26,11 @@ extern int register_sh_pmu(struct sh_pmu *);
extern int reserve_pmc_hardware(void);
extern void release_pmc_hardware(void);
+static inline void set_perf_event_pending(void)
+{
+ /* Nothing to see here, move along. */
+}
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
#endif /* __ASM_SH_PERF_EVENT_H */
diff --git a/trunk/arch/sh/kernel/module.c b/trunk/arch/sh/kernel/module.c
index ae0be697a89e..43adddfe4c04 100644
--- a/trunk/arch/sh/kernel/module.c
+++ b/trunk/arch/sh/kernel/module.c
@@ -149,11 +149,13 @@ int module_finalize(const Elf_Ehdr *hdr,
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
+ ret |= module_bug_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
+ module_bug_cleanup(mod);
module_dwarf_cleanup(mod);
}
diff --git a/trunk/arch/sh/kernel/perf_callchain.c b/trunk/arch/sh/kernel/perf_callchain.c
index d5ca1ef50fa9..a9dd3abde28e 100644
--- a/trunk/arch/sh/kernel/perf_callchain.c
+++ b/trunk/arch/sh/kernel/perf_callchain.c
@@ -14,6 +14,11 @@
#include
#include
+static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
static void callchain_warning(void *data, char *msg)
{
@@ -34,7 +39,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
struct perf_callchain_entry *entry = data;
if (reliable)
- perf_callchain_store(entry, addr);
+ callchain_store(entry, addr);
}
static const struct stacktrace_ops callchain_ops = {
@@ -44,10 +49,47 @@ static const struct stacktrace_ops callchain_ops = {
.address = callchain_address,
};
-void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+static void
+perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
- perf_callchain_store(entry, regs->pc);
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, regs->pc);
unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
}
+
+static void
+perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ /*
+ * Only the kernel side is implemented for now.
+ */
+ if (!is_user)
+ perf_callchain_kernel(regs, entry);
+}
+
+/*
+ * No need for separate IRQ and NMI entries.
+ */
+static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+
+ entry->nr = 0;
+
+ perf_do_callchain(regs, entry);
+
+ return entry;
+}
diff --git a/trunk/arch/sh/kernel/perf_event.c b/trunk/arch/sh/kernel/perf_event.c
index 5a4b33435650..7a3dc3567258 100644
--- a/trunk/arch/sh/kernel/perf_event.c
+++ b/trunk/arch/sh/kernel/perf_event.c
@@ -59,24 +59,6 @@ static inline int sh_pmu_initialized(void)
return !!sh_pmu;
}
-const char *perf_pmu_name(void)
-{
- if (!sh_pmu)
- return NULL;
-
- return sh_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
- if (!sh_pmu)
- return 0;
-
- return sh_pmu->num_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
/*
* Release the PMU if this is the last perf_event.
*/
@@ -224,80 +206,50 @@ static void sh_perf_event_update(struct perf_event *event,
local64_add(delta, &event->count);
}
-static void sh_pmu_stop(struct perf_event *event, int flags)
+static void sh_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (!(event->hw.state & PERF_HES_STOPPED)) {
- sh_pmu->disable(hwc, idx);
- cpuc->events[idx] = NULL;
- event->hw.state |= PERF_HES_STOPPED;
- }
-
- if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
- sh_perf_event_update(event, &event->hw, idx);
- event->hw.state |= PERF_HES_UPTODATE;
- }
-}
-
-static void sh_pmu_start(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- if (WARN_ON_ONCE(idx == -1))
- return;
-
- if (flags & PERF_EF_RELOAD)
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+ clear_bit(idx, cpuc->active_mask);
+ sh_pmu->disable(hwc, idx);
- cpuc->events[idx] = event;
- event->hw.state = 0;
- sh_pmu->enable(hwc, idx);
-}
+ barrier();
-static void sh_pmu_del(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ sh_perf_event_update(event, &event->hw, idx);
- sh_pmu_stop(event, PERF_EF_UPDATE);
- __clear_bit(event->hw.idx, cpuc->used_mask);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
-static int sh_pmu_add(struct perf_event *event, int flags)
+static int sh_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- int ret = -EAGAIN;
-
- perf_pmu_disable(event->pmu);
- if (__test_and_set_bit(idx, cpuc->used_mask)) {
+ if (test_and_set_bit(idx, cpuc->used_mask)) {
idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
if (idx == sh_pmu->num_events)
- goto out;
+ return -EAGAIN;
- __set_bit(idx, cpuc->used_mask);
+ set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
sh_pmu->disable(hwc, idx);
- event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
- if (flags & PERF_EF_START)
- sh_pmu_start(event, PERF_EF_RELOAD);
+ cpuc->events[idx] = event;
+ set_bit(idx, cpuc->active_mask);
+
+ sh_pmu->enable(hwc, idx);
perf_event_update_userpage(event);
- ret = 0;
-out:
- perf_pmu_enable(event->pmu);
- return ret;
+
+ return 0;
}
static void sh_pmu_read(struct perf_event *event)
@@ -305,56 +257,24 @@ static void sh_pmu_read(struct perf_event *event)
sh_perf_event_update(event, &event->hw, event->hw.idx);
}
-static int sh_pmu_event_init(struct perf_event *event)
-{
- int err;
-
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HW_CACHE:
- case PERF_TYPE_HARDWARE:
- err = __hw_perf_event_init(event);
- break;
-
- default:
- return -ENOENT;
- }
+static const struct pmu pmu = {
+ .enable = sh_pmu_enable,
+ .disable = sh_pmu_disable,
+ .read = sh_pmu_read,
+};
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = __hw_perf_event_init(event);
if (unlikely(err)) {
if (event->destroy)
event->destroy(event);
+ return ERR_PTR(err);
}
- return err;
-}
-
-static void sh_pmu_enable(struct pmu *pmu)
-{
- if (!sh_pmu_initialized())
- return;
-
- sh_pmu->enable_all();
-}
-
-static void sh_pmu_disable(struct pmu *pmu)
-{
- if (!sh_pmu_initialized())
- return;
-
- sh_pmu->disable_all();
+ return &pmu;
}
-static struct pmu pmu = {
- .pmu_enable = sh_pmu_enable,
- .pmu_disable = sh_pmu_disable,
- .event_init = sh_pmu_event_init,
- .add = sh_pmu_add,
- .del = sh_pmu_del,
- .start = sh_pmu_start,
- .stop = sh_pmu_stop,
- .read = sh_pmu_read,
-};
-
static void sh_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -379,17 +299,32 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
+void hw_perf_enable(void)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->enable_all();
+}
+
+void hw_perf_disable(void)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->disable_all();
+}
+
+int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
{
if (sh_pmu)
return -EBUSY;
- sh_pmu = _pmu;
+ sh_pmu = pmu;
- pr_info("Performance Events: %s support registered\n", _pmu->name);
+ pr_info("Performance Events: %s support registered\n", pmu->name);
- WARN_ON(_pmu->num_events > MAX_HWEVENTS);
+ WARN_ON(pmu->num_events > MAX_HWEVENTS);
- perf_pmu_register(&pmu);
perf_cpu_notifier(sh_pmu_notifier);
return 0;
}
diff --git a/trunk/arch/sh/oprofile/Makefile b/trunk/arch/sh/oprofile/Makefile
index e85aae73e3dc..4886c5c1786c 100644
--- a/trunk/arch/sh/oprofile/Makefile
+++ b/trunk/arch/sh/oprofile/Makefile
@@ -6,8 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-ifeq ($(CONFIG_HW_PERF_EVENTS),y)
-DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
-endif
-
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
diff --git a/trunk/arch/sh/oprofile/common.c b/trunk/arch/sh/oprofile/common.c
index e10d89376f9b..ac604937f3ee 100644
--- a/trunk/arch/sh/oprofile/common.c
+++ b/trunk/arch/sh/oprofile/common.c
@@ -17,45 +17,114 @@
#include
#include
#include
-#include
#include
+#include "op_impl.h"
+
+static struct op_sh_model *model;
+
+static struct op_counter_config ctr[20];
-#ifdef CONFIG_HW_PERF_EVENTS
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
-char *op_name_from_perf_id(void)
+static int op_sh_setup(void)
+{
+ /* Pre-compute the values to stuff in the hardware registers. */
+ model->reg_setup(ctr);
+
+ /* Configure the registers on all cpus. */
+ on_each_cpu(model->cpu_setup, NULL, 1);
+
+ return 0;
+}
+
+static int op_sh_create_files(struct super_block *sb, struct dentry *root)
{
- const char *pmu;
- char buf[20];
- int size;
+ int i, ret = 0;
- pmu = perf_pmu_name();
- if (!pmu)
- return NULL;
+ for (i = 0; i < model->num_counters; i++) {
+ struct dentry *dir;
+ char buf[4];
- size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
- if (size > -1 && size < sizeof(buf))
- return buf;
+ snprintf(buf, sizeof(buf), "%d", i);
+ dir = oprofilefs_mkdir(sb, root, buf);
- return NULL;
+ ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
+ ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
+ ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
+ ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+
+ if (model->create_files)
+ ret |= model->create_files(sb, dir);
+ else
+ ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+
+ /* Dummy entries */
+ ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ }
+
+ return ret;
}
-int __init oprofile_arch_init(struct oprofile_operations *ops)
+static int op_sh_start(void)
{
- ops->backtrace = sh_backtrace;
+ /* Enable performance monitoring for all counters. */
+ on_each_cpu(model->cpu_start, NULL, 1);
- return oprofile_perf_init(ops);
+ return 0;
}
-void __exit oprofile_arch_exit(void)
+static void op_sh_stop(void)
{
- oprofile_perf_exit();
+ /* Disable performance monitoring for all counters. */
+ on_each_cpu(model->cpu_stop, NULL, 1);
}
-#else
+
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- pr_info("oprofile: hardware counters not available\n");
- return -ENODEV;
+ struct op_sh_model *lmodel = NULL;
+ int ret;
+
+ /*
+ * Always assign the backtrace op. If the counter initialization
+ * fails, we fall back to the timer which will still make use of
+ * this.
+ */
+ ops->backtrace = sh_backtrace;
+
+ /*
+ * XXX
+ *
+ * All of the SH7750/SH-4A counters have been converted to perf,
+ * this infrastructure hook is left for other users until they've
+ * had a chance to convert over, at which point all of this
+ * will be deleted.
+ */
+
+ if (!lmodel)
+ return -ENODEV;
+ if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
+ return -ENODEV;
+
+ ret = lmodel->init();
+ if (unlikely(ret != 0))
+ return ret;
+
+ model = lmodel;
+
+ ops->setup = op_sh_setup;
+ ops->create_files = op_sh_create_files;
+ ops->start = op_sh_start;
+ ops->stop = op_sh_stop;
+ ops->cpu_type = lmodel->cpu_type;
+
+ printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
+ lmodel->cpu_type);
+
+ return 0;
+}
+
+void oprofile_arch_exit(void)
+{
+ if (model && model->exit)
+ model->exit();
}
-void __exit oprofile_arch_exit(void) {}
-#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/trunk/arch/sh/oprofile/op_impl.h b/trunk/arch/sh/oprofile/op_impl.h
new file mode 100644
index 000000000000..1244479ceb29
--- /dev/null
+++ b/trunk/arch/sh/oprofile/op_impl.h
@@ -0,0 +1,33 @@
+#ifndef __OP_IMPL_H
+#define __OP_IMPL_H
+
+/* Per-counter configuration as set via oprofilefs. */
+struct op_counter_config {
+ unsigned long enabled;
+ unsigned long event;
+
+ unsigned long count;
+
+ /* Dummy values for userspace tool compliance */
+ unsigned long kernel;
+ unsigned long user;
+ unsigned long unit_mask;
+};
+
+/* Per-architecture configury and hooks. */
+struct op_sh_model {
+ void (*reg_setup)(struct op_counter_config *);
+ int (*create_files)(struct super_block *sb, struct dentry *dir);
+ void (*cpu_setup)(void *dummy);
+ int (*init)(void);
+ void (*exit)(void);
+ void (*cpu_start)(void *args);
+ void (*cpu_stop)(void *args);
+ char *cpu_type;
+ unsigned char num_counters;
+};
+
+/* arch/sh/oprofile/common.c */
+extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
+
+#endif /* __OP_IMPL_H */
diff --git a/trunk/arch/sparc/Kconfig b/trunk/arch/sparc/Kconfig
index 3e9d31401fb2..491e9d6de191 100644
--- a/trunk/arch/sparc/Kconfig
+++ b/trunk/arch/sparc/Kconfig
@@ -26,12 +26,10 @@ config SPARC
select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS
select RTC_DRV_M48T59
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
config SPARC32
def_bool !64BIT
@@ -55,7 +53,6 @@ config SPARC64
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
diff --git a/trunk/arch/sparc/include/asm/compat.h b/trunk/arch/sparc/include/asm/compat.h
index 6f57325bb883..5016f76ea98a 100644
--- a/trunk/arch/sparc/include/asm/compat.h
+++ b/trunk/arch/sparc/include/asm/compat.h
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/trunk/arch/sparc/include/asm/jump_label.h b/trunk/arch/sparc/include/asm/jump_label.h
deleted file mode 100644
index 62e66d7b2fb6..000000000000
--- a/trunk/arch/sparc/include/asm/jump_label.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _ASM_SPARC_JUMP_LABEL_H
-#define _ASM_SPARC_JUMP_LABEL_H
-
-#ifdef __KERNEL__
-
-#include
-#include
-
-#define JUMP_LABEL_NOP_SIZE 4
-
-#define JUMP_LABEL(key, label) \
- do { \
- asm goto("1:\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- ".pushsection __jump_table, \"a\"\n\t"\
- ".word 1b, %l[" #label "], %c0\n\t" \
- ".popsection \n\t" \
- : : "i" (key) : : label);\
- } while (0)
-
-#endif /* __KERNEL__ */
-
-typedef u32 jump_label_t;
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
-#endif
diff --git a/trunk/arch/sparc/include/asm/perf_event.h b/trunk/arch/sparc/include/asm/perf_event.h
index 6e8bfa1786da..727af70646cb 100644
--- a/trunk/arch/sparc/include/asm/perf_event.h
+++ b/trunk/arch/sparc/include/asm/perf_event.h
@@ -1,6 +1,10 @@
#ifndef __ASM_SPARC_PERF_EVENT_H
#define __ASM_SPARC_PERF_EVENT_H
+extern void set_perf_event_pending(void);
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
#ifdef CONFIG_PERF_EVENTS
#include
diff --git a/trunk/arch/sparc/kernel/Makefile b/trunk/arch/sparc/kernel/Makefile
index 599398fbbc7c..0c2dc1f24a9a 100644
--- a/trunk/arch/sparc/kernel/Makefile
+++ b/trunk/arch/sparc/kernel/Makefile
@@ -119,5 +119,3 @@ obj-$(CONFIG_COMPAT) += $(audit--y)
pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y)
-
-obj-$(CONFIG_SPARC64) += jump_label.o
diff --git a/trunk/arch/sparc/kernel/jump_label.c b/trunk/arch/sparc/kernel/jump_label.c
deleted file mode 100644
index ea2dafc93d78..000000000000
--- a/trunk/arch/sparc/kernel/jump_label.c
+++ /dev/null
@@ -1,47 +0,0 @@
-#include
-#include
-#include
-#include
-
-#include
-#include
-
-#ifdef HAVE_JUMP_LABEL
-
-void arch_jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
-{
- u32 val;
- u32 *insn = (u32 *) (unsigned long) entry->code;
-
- if (type == JUMP_LABEL_ENABLE) {
- s32 off = (s32)entry->target - (s32)entry->code;
-
-#ifdef CONFIG_SPARC64
- /* ba,pt %xcc, . + (off << 2) */
- val = 0x10680000 | ((u32) off >> 2);
-#else
- /* ba . + (off << 2) */
- val = 0x10800000 | ((u32) off >> 2);
-#endif
- } else {
- val = 0x01000000;
- }
-
- get_online_cpus();
- mutex_lock(&text_mutex);
- *insn = val;
- flushi(insn);
- mutex_unlock(&text_mutex);
- put_online_cpus();
-}
-
-void arch_jump_label_text_poke_early(jump_label_t addr)
-{
- u32 *insn_p = (u32 *) (unsigned long) addr;
-
- *insn_p = 0x01000000;
- flushi(insn_p);
-}
-
-#endif
diff --git a/trunk/arch/sparc/kernel/module.c b/trunk/arch/sparc/kernel/module.c
index ee3c7dde8d9f..f848aadf54dc 100644
--- a/trunk/arch/sparc/kernel/module.c
+++ b/trunk/arch/sparc/kernel/module.c
@@ -18,9 +18,6 @@
#include
#ifdef CONFIG_SPARC64
-
-#include
-
static void *module_map(unsigned long size)
{
struct vm_struct *area;
@@ -230,9 +227,6 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- /* make jump label nops */
- jump_label_apply_nops(me);
-
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
diff --git a/trunk/arch/sparc/kernel/pcr.c b/trunk/arch/sparc/kernel/pcr.c
index b87873c0e8ea..c4a6a50b4849 100644
--- a/trunk/arch/sparc/kernel/pcr.c
+++ b/trunk/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
#include
#include
-#include
+#include
#include
#include
@@ -43,14 +43,14 @@ void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
old_regs = set_irq_regs(regs);
irq_enter();
-#ifdef CONFIG_IRQ_WORK
- irq_work_run();
+#ifdef CONFIG_PERF_EVENTS
+ perf_event_do_pending();
#endif
irq_exit();
set_irq_regs(old_regs);
}
-void arch_irq_work_raise(void)
+void set_perf_event_pending(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
diff --git a/trunk/arch/sparc/kernel/perf_event.c b/trunk/arch/sparc/kernel/perf_event.c
index 0d6deb55a2ae..357ced3c33ff 100644
--- a/trunk/arch/sparc/kernel/perf_event.c
+++ b/trunk/arch/sparc/kernel/perf_event.c
@@ -658,16 +658,13 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
enc = perf_event_get_enc(cpuc->events[i]);
pcr &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
- pcr |= nop_for_index(idx);
- else
- pcr |= event_encoding(enc, idx);
+ pcr |= event_encoding(enc, idx);
}
out:
return pcr;
}
-static void sparc_pmu_enable(struct pmu *pmu)
+void hw_perf_enable(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 pcr;
@@ -694,7 +691,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
pcr_ops->write(cpuc->pcr);
}
-static void sparc_pmu_disable(struct pmu *pmu)
+void hw_perf_disable(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
@@ -713,65 +710,19 @@ static void sparc_pmu_disable(struct pmu *pmu)
pcr_ops->write(cpuc->pcr);
}
-static int active_event_index(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- int i;
-
- for (i = 0; i < cpuc->n_events; i++) {
- if (cpuc->event[i] == event)
- break;
- }
- BUG_ON(i == cpuc->n_events);
- return cpuc->current_idx[i];
-}
-
-static void sparc_pmu_start(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx = active_event_index(cpuc, event);
-
- if (flags & PERF_EF_RELOAD) {
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
- sparc_perf_event_set_period(event, &event->hw, idx);
- }
-
- event->hw.state = 0;
-
- sparc_pmu_enable_event(cpuc, &event->hw, idx);
-}
-
-static void sparc_pmu_stop(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- int idx = active_event_index(cpuc, event);
-
- if (!(event->hw.state & PERF_HES_STOPPED)) {
- sparc_pmu_disable_event(cpuc, &event->hw, idx);
- event->hw.state |= PERF_HES_STOPPED;
- }
-
- if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
- sparc_perf_event_update(event, &event->hw, idx);
- event->hw.state |= PERF_HES_UPTODATE;
- }
-}
-
-static void sparc_pmu_del(struct perf_event *event, int _flags)
+static void sparc_pmu_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
int i;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
+ perf_disable();
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
- /* Absorb the final count and turn off the
- * event.
- */
- sparc_pmu_stop(event, PERF_EF_UPDATE);
+ int idx = cpuc->current_idx[i];
/* Shift remaining entries down into
* the existing slot.
@@ -783,6 +734,13 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
cpuc->current_idx[i];
}
+ /* Absorb the final count and turn off the
+ * event.
+ */
+ sparc_pmu_disable_event(cpuc, hwc, idx);
+ barrier();
+ sparc_perf_event_update(event, hwc, idx);
+
perf_event_update_userpage(event);
cpuc->n_events--;
@@ -790,10 +748,23 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
}
}
- perf_pmu_enable(event->pmu);
+ perf_enable();
local_irq_restore(flags);
}
+static int active_event_index(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int i;
+
+ for (i = 0; i < cpuc->n_events; i++) {
+ if (cpuc->event[i] == event)
+ break;
+ }
+ BUG_ON(i == cpuc->n_events);
+ return cpuc->current_idx[i];
+}
+
static void sparc_pmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -803,6 +774,15 @@ static void sparc_pmu_read(struct perf_event *event)
sparc_perf_event_update(event, hwc, idx);
}
+static void sparc_pmu_unthrottle(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx = active_event_index(cpuc, event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ sparc_pmu_enable_event(cpuc, hwc, idx);
+}
+
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
@@ -897,7 +877,7 @@ static int sparc_check_constraints(struct perf_event **evts,
if (!n_ev)
return 0;
- if (n_ev > MAX_HWEVENTS)
+ if (n_ev > perf_max_events)
return -1;
msk0 = perf_event_get_msk(events[0]);
@@ -1004,27 +984,23 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-static int sparc_pmu_add(struct perf_event *event, int ef_flags)
+static int sparc_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
local_irq_save(flags);
- perf_pmu_disable(event->pmu);
+ perf_disable();
n0 = cpuc->n_events;
- if (n0 >= MAX_HWEVENTS)
+ if (n0 >= perf_max_events)
goto out;
cpuc->event[n0] = event;
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
- event->hw.state = PERF_HES_UPTODATE;
- if (!(ef_flags & PERF_EF_START))
- event->hw.state |= PERF_HES_STOPPED;
-
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
@@ -1044,12 +1020,12 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
ret = 0;
out:
- perf_pmu_enable(event->pmu);
+ perf_enable();
local_irq_restore(flags);
return ret;
}
-static int sparc_pmu_event_init(struct perf_event *event)
+static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct perf_event *evts[MAX_HWEVENTS];
@@ -1062,37 +1038,16 @@ static int sparc_pmu_event_init(struct perf_event *event)
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
- switch (attr->type) {
- case PERF_TYPE_HARDWARE:
+ if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
pmap = sparc_pmu->event_map(attr->config);
- break;
-
- case PERF_TYPE_HW_CACHE:
+ } else if (attr->type == PERF_TYPE_HW_CACHE) {
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
- break;
-
- case PERF_TYPE_RAW:
- pmap = NULL;
- break;
-
- default:
- return -ENOENT;
-
- }
-
- if (pmap) {
- hwc->event_base = perf_event_encode(pmap);
- } else {
- /*
- * User gives us "(encoding << 16) | pic_mask" for
- * PERF_TYPE_RAW events.
- */
- hwc->event_base = attr->config;
- }
+ } else
+ return -EOPNOTSUPP;
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
@@ -1103,10 +1058,12 @@ static int sparc_pmu_event_init(struct perf_event *event)
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
+ hwc->event_base = perf_event_encode(pmap);
+
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
- MAX_HWEVENTS - 1,
+ perf_max_events - 1,
evts, events, current_idx_dmy);
if (n < 0)
return -EINVAL;
@@ -1142,11 +1099,10 @@ static int sparc_pmu_event_init(struct perf_event *event)
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-static void sparc_pmu_start_txn(struct pmu *pmu)
+static void sparc_pmu_start_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
- perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
}
@@ -1155,12 +1111,11 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-static void sparc_pmu_cancel_txn(struct pmu *pmu)
+static void sparc_pmu_cancel_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
- perf_pmu_enable(pmu);
}
/*
@@ -1168,7 +1123,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-static int sparc_pmu_commit_txn(struct pmu *pmu)
+static int sparc_pmu_commit_txn(const struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
@@ -1184,24 +1139,28 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
return -EAGAIN;
cpuc->group_flag &= ~PERF_EVENT_TXN;
- perf_pmu_enable(pmu);
return 0;
}
-static struct pmu pmu = {
- .pmu_enable = sparc_pmu_enable,
- .pmu_disable = sparc_pmu_disable,
- .event_init = sparc_pmu_event_init,
- .add = sparc_pmu_add,
- .del = sparc_pmu_del,
- .start = sparc_pmu_start,
- .stop = sparc_pmu_stop,
+static const struct pmu pmu = {
+ .enable = sparc_pmu_enable,
+ .disable = sparc_pmu_disable,
.read = sparc_pmu_read,
+ .unthrottle = sparc_pmu_unthrottle,
.start_txn = sparc_pmu_start_txn,
.cancel_txn = sparc_pmu_cancel_txn,
.commit_txn = sparc_pmu_commit_txn,
};
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = __hw_perf_event_init(event);
+
+ if (err)
+ return ERR_PTR(err);
+ return &pmu;
+}
+
void perf_event_print_debug(void)
{
unsigned long flags;
@@ -1277,7 +1236,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
continue;
if (perf_event_overflow(event, 1, &data, regs))
- sparc_pmu_stop(event, 0);
+ sparc_pmu_disable_event(cpuc, hwc, idx);
}
return NOTIFY_STOP;
@@ -1318,21 +1277,28 @@ void __init init_hw_perf_events(void)
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
- perf_pmu_register(&pmu);
+ /* All sparc64 PMUs currently have 2 events. */
+ perf_max_events = 2;
+
register_die_notifier(&perf_event_nmi_notifier);
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+static void perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned long ksp, fp;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph = 0;
#endif
- stack_trace_flush();
-
- perf_callchain_store(entry, regs->tpc);
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, regs->tpc);
ksp = regs->u_regs[UREG_I6];
fp = ksp + STACK_BIAS;
@@ -1356,13 +1322,13 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
- perf_callchain_store(entry, pc);
+ callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
int index = current->curr_ret_stack;
if (current->ret_stack && index >= graph) {
pc = current->ret_stack[index - graph].ret;
- perf_callchain_store(entry, pc);
+ callchain_store(entry, pc);
graph++;
}
}
@@ -1370,12 +1336,13 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static void perf_callchain_user_64(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
@@ -1388,16 +1355,17 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
- perf_callchain_store(entry, pc);
+ callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+static void perf_callchain_user_32(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
{
unsigned long ufp;
- perf_callchain_store(entry, regs->tpc);
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
@@ -1410,16 +1378,34 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
- perf_callchain_store(entry, pc);
+ callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
-void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+/* Like powerpc we can't get PMU interrupts within the PMU handler,
+ * so no need for separate NMI and IRQ chains as on x86.
+ */
+static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
- flushw_user();
- if (test_thread_flag(TIF_32BIT))
- perf_callchain_user_32(entry, regs);
- else
- perf_callchain_user_64(entry, regs);
+ struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+
+ entry->nr = 0;
+ if (!user_mode(regs)) {
+ stack_trace_flush();
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+ if (regs) {
+ flushw_user();
+ if (test_thread_flag(TIF_32BIT))
+ perf_callchain_user_32(regs, entry);
+ else
+ perf_callchain_user_64(regs, entry);
+ }
+ return entry;
}
diff --git a/trunk/arch/sparc/kernel/signal32.c b/trunk/arch/sparc/kernel/signal32.c
index 75fad425e249..ea22cd373c64 100644
--- a/trunk/arch/sparc/kernel/signal32.c
+++ b/trunk/arch/sparc/kernel/signal32.c
@@ -453,66 +453,8 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
return err;
}
-/* The I-cache flush instruction only works in the primary ASI, which
- * right now is the nucleus, aka. kernel space.
- *
- * Therefore we have to kick the instructions out using the kernel
- * side linear mapping of the physical address backing the user
- * instructions.
- */
-static void flush_signal_insns(unsigned long address)
-{
- unsigned long pstate, paddr;
- pte_t *ptep, pte;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
-
- /* Commit all stores of the instructions we are about to flush. */
- wmb();
-
- /* Disable cross-call reception. In this way even a very wide
- * munmap() on another cpu can't tear down the page table
- * hierarchy from underneath us, since that can't complete
- * until the IPI tlb flush returns.
- */
-
- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
- __asm__ __volatile__("wrpr %0, %1, %%pstate"
- : : "r" (pstate), "i" (PSTATE_IE));
-
- pgdp = pgd_offset(current->mm, address);
- if (pgd_none(*pgdp))
- goto out_irqs_on;
- pudp = pud_offset(pgdp, address);
- if (pud_none(*pudp))
- goto out_irqs_on;
- pmdp = pmd_offset(pudp, address);
- if (pmd_none(*pmdp))
- goto out_irqs_on;
-
- ptep = pte_offset_map(pmdp, address);
- pte = *ptep;
- if (!pte_present(pte))
- goto out_unmap;
-
- paddr = (unsigned long) page_address(pte_page(pte));
-
- __asm__ __volatile__("flush %0 + %1"
- : /* no outputs */
- : "r" (paddr),
- "r" (address & (PAGE_SIZE - 1))
- : "memory");
-
-out_unmap:
- pte_unmap(ptep);
-out_irqs_on:
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
-
-}
-
-static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int sigframe_size;
@@ -605,7 +547,13 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
+ /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
+ pgd_t *pgdp = pgd_offset(current->mm, address);
+ pud_t *pudp = pud_offset(pgdp, address);
+ pmd_t *pmdp = pmd_offset(pudp, address);
+ pte_t *ptep;
+ pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -614,22 +562,34 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (err)
goto sigsegv;
- flush_signal_insns(address);
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ pte = *ptep;
+ if (pte_present(pte)) {
+ unsigned long page = (unsigned long)
+ page_address(pte_page(pte));
+
+ wmb();
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (page),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+ }
+ pte_unmap(ptep);
+ preempt_enable();
}
- return 0;
+ return;
sigill:
do_exit(SIGILL);
- return -EINVAL;
-
sigsegv:
force_sigsegv(signo, current);
- return -EFAULT;
}
-static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
- unsigned long signr, sigset_t *oldset,
- siginfo_t *info)
+static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ unsigned long signr, sigset_t *oldset,
+ siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int sigframe_size;
@@ -727,7 +687,12 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
+ /* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
+ pgd_t *pgdp = pgd_offset(current->mm, address);
+ pud_t *pudp = pud_offset(pgdp, address);
+ pmd_t *pmdp = pmd_offset(pudp, address);
+ pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -739,32 +704,38 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (err)
goto sigsegv;
- flush_signal_insns(address);
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ if (pte_present(*ptep)) {
+ unsigned long page = (unsigned long)
+ page_address(pte_page(*ptep));
+
+ wmb();
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (page),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+ }
+ pte_unmap(ptep);
+ preempt_enable();
}
- return 0;
+ return;
sigill:
do_exit(SIGILL);
- return -EINVAL;
-
sigsegv:
force_sigsegv(signr, current);
- return -EFAULT;
}
-static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
- int err;
-
if (ka->sa.sa_flags & SA_SIGINFO)
- err = setup_rt_frame32(ka, regs, signr, oldset, info);
+ setup_rt_frame32(ka, regs, signr, oldset, info);
else
- err = setup_frame32(ka, regs, signr, oldset);
-
- if (err)
- return err;
+ setup_frame32(ka, regs, signr, oldset);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -772,10 +743,6 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
-
- tracehook_signal_handler(signr, info, ka, regs, 0);
-
- return 0;
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -822,14 +789,16 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
- if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- }
+ handle_signal32(signr, &ka, &info, oldset, regs);
+
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+
+ tracehook_signal_handler(signr, &info, &ka, regs, 0);
return;
}
if (restart_syscall &&
@@ -840,14 +809,12 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
- pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
- pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
diff --git a/trunk/arch/sparc/kernel/signal_32.c b/trunk/arch/sparc/kernel/signal_32.c
index 5e5c5fd03783..9882df92ba0a 100644
--- a/trunk/arch/sparc/kernel/signal_32.c
+++ b/trunk/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
return err;
}
-static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset)
+static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
{
struct signal_frame __user *sf;
int sigframe_size, err;
@@ -384,19 +384,16 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return 0;
+ return;
sigill_and_return:
do_exit(SIGILL);
- return -EINVAL;
-
sigsegv:
force_sigsegv(signo, current);
- return -EFAULT;
}
-static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signo, sigset_t *oldset, siginfo_t *info)
+static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size;
@@ -469,30 +466,22 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
- return 0;
+ return;
sigill:
do_exit(SIGILL);
- return -EINVAL;
-
sigsegv:
force_sigsegv(signo, current);
- return -EFAULT;
}
-static inline int
+static inline void
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
- int err;
-
if (ka->sa.sa_flags & SA_SIGINFO)
- err = setup_rt_frame(ka, regs, signr, oldset, info);
+ setup_rt_frame(ka, regs, signr, oldset, info);
else
- err = setup_frame(ka, regs, signr, oldset);
-
- if (err)
- return err;
+ setup_frame(ka, regs, signr, oldset);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
@@ -500,10 +489,6 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
sigaddset(¤t->blocked, signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
-
- tracehook_signal_handler(signr, info, ka, regs, 0);
-
- return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -561,15 +546,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
+ handle_signal(signr, &ka, &info, oldset, regs);
+
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ tracehook_signal_handler(signr, &info, &ka, regs, 0);
return;
}
if (restart_syscall &&
@@ -580,14 +567,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
regs->npc -= 4;
- pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
regs->npc -= 4;
- pt_regs_clear_syscall(regs);
}
/* if there's no signal to deliver, we just put the saved sigmask
diff --git a/trunk/arch/sparc/kernel/signal_64.c b/trunk/arch/sparc/kernel/signal_64.c
index 006fe4515886..9fa48c30037e 100644
--- a/trunk/arch/sparc/kernel/signal_64.c
+++ b/trunk/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
return (void __user *) sp;
}
-static inline int
+static inline void
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
@@ -483,37 +483,26 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
- return 0;
+ return;
sigill:
do_exit(SIGILL);
- return -EINVAL;
-
sigsegv:
force_sigsegv(signo, current);
- return -EFAULT;
}
-static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
- siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs)
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
{
- int err;
-
- err = setup_rt_frame(ka, regs, signr, oldset,
- (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
- if (err)
- return err;
+ setup_rt_frame(ka, regs, signr, oldset,
+ (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(¤t->blocked,signr);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
-
- tracehook_signal_handler(signr, info, ka, regs, 0);
-
- return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -582,14 +571,16 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
- if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
- /* A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TS_RESTORE_SIGMASK flag.
- */
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- }
+ handle_signal(signr, &ka, &info, oldset, regs);
+
+ /* A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TS_RESTORE_SIGMASK flag.
+ */
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+
+ tracehook_signal_handler(signr, &info, &ka, regs, 0);
return;
}
if (restart_syscall &&
@@ -600,14 +591,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
- pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
- pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
diff --git a/trunk/arch/tile/include/arch/chip_tile64.h b/trunk/arch/tile/include/arch/chip_tile64.h
index 261aaba092d4..1246573be59e 100644
--- a/trunk/arch/tile/include/arch/chip_tile64.h
+++ b/trunk/arch/tile/include/arch/chip_tile64.h
@@ -150,9 +150,6 @@
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 0
-/** Is the DSTREAM_PF SPR supported? */
-#define CHIP_HAS_DSTREAM_PF() 0
-
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
diff --git a/trunk/arch/tile/include/arch/chip_tilepro.h b/trunk/arch/tile/include/arch/chip_tilepro.h
index 70017699a74c..e864c47fc89c 100644
--- a/trunk/arch/tile/include/arch/chip_tilepro.h
+++ b/trunk/arch/tile/include/arch/chip_tilepro.h
@@ -150,9 +150,6 @@
/** Is the PROC_STATUS SPR supported? */
#define CHIP_HAS_PROC_STATUS_SPR() 1
-/** Is the DSTREAM_PF SPR supported? */
-#define CHIP_HAS_DSTREAM_PF() 0
-
/** Log of the number of mshims we have. */
#define CHIP_LOG_NUM_MSHIMS() 2
diff --git a/trunk/arch/tile/include/asm/compat.h b/trunk/arch/tile/include/asm/compat.h
index 8b60ec8b2d19..5a34da6cdd79 100644
--- a/trunk/arch/tile/include/asm/compat.h
+++ b/trunk/arch/tile/include/asm/compat.h
@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr)
return (long)(int)(long __force)uptr;
}
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
@@ -214,9 +214,8 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
struct compat_sigaction;
struct compat_siginfo;
struct compat_sigaltstack;
-long compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
+long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp);
long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
struct compat_sigaction __user *oact,
size_t sigsetsize);
diff --git a/trunk/arch/tile/include/asm/io.h b/trunk/arch/tile/include/asm/io.h
index ee43328713ab..8c95bef3fa45 100644
--- a/trunk/arch/tile/include/asm/io.h
+++ b/trunk/arch/tile/include/asm/io.h
@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
#define iowrite32 writel
#define iowrite64 writeq
-static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
- size_t len)
+static inline void *memcpy_fromio(void *dst, void *src, int len)
{
int x;
BUG_ON((unsigned long)src & 0x3);
for (x = 0; x < len; x += 4)
*(u32 *)(dst + x) = readl(src + x);
+ return dst;
}
-static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
- size_t len)
+static inline void *memcpy_toio(void *dst, void *src, int len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
for (x = 0; x < len; x += 4)
writel(*(u32 *)(src + x), dst + x);
+ return dst;
}
/*
diff --git a/trunk/arch/tile/include/asm/processor.h b/trunk/arch/tile/include/asm/processor.h
index ccd5f8425688..d942d09b252e 100644
--- a/trunk/arch/tile/include/asm/processor.h
+++ b/trunk/arch/tile/include/asm/processor.h
@@ -103,18 +103,6 @@ struct thread_struct {
/* Any other miscellaneous processor state bits */
unsigned long proc_status;
#endif
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- /* Interrupt base for PL0 interrupts */
- unsigned long interrupt_vector_base;
-#endif
-#if CHIP_HAS_TILE_RTF_HWM()
- /* Tile cache retry fifo high-water mark */
- unsigned long tile_rtf_hwm;
-#endif
-#if CHIP_HAS_DSTREAM_PF()
- /* Data stream prefetch control */
- unsigned long dstream_pf;
-#endif
#ifdef CONFIG_HARDWALL
/* Is this task tied to an activated hardwall? */
struct hardwall_info *hardwall;
diff --git a/trunk/arch/tile/include/asm/ptrace.h b/trunk/arch/tile/include/asm/ptrace.h
index 4a02bb073979..acdae814e016 100644
--- a/trunk/arch/tile/include/asm/ptrace.h
+++ b/trunk/arch/tile/include/asm/ptrace.h
@@ -51,7 +51,10 @@ typedef uint_reg_t pt_reg_t;
/*
* This struct defines the way the registers are stored on the stack during a
- * system call or exception. "struct sigcontext" has the same shape.
+ * system call/exception. It should be a multiple of 8 bytes to preserve
+ * normal stack alignment rules.
+ *
+ * Must track and
*/
struct pt_regs {
/* Saved main processor registers; 56..63 are special. */
@@ -77,6 +80,11 @@ struct pt_regs {
#endif /* __ASSEMBLY__ */
+/* Flag bits in pt_regs.flags */
+#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
+#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
+#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
+
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
@@ -93,11 +101,6 @@ struct pt_regs {
#ifdef __KERNEL__
-/* Flag bits in pt_regs.flags */
-#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
-#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
-#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
-
#ifndef __ASSEMBLY__
#define instruction_pointer(regs) ((regs)->pc)
diff --git a/trunk/arch/tile/include/asm/sigcontext.h b/trunk/arch/tile/include/asm/sigcontext.h
index 5e2d03336f53..7cd7672e3ad4 100644
--- a/trunk/arch/tile/include/asm/sigcontext.h
+++ b/trunk/arch/tile/include/asm/sigcontext.h
@@ -15,21 +15,13 @@
#ifndef _ASM_TILE_SIGCONTEXT_H
#define _ASM_TILE_SIGCONTEXT_H
-#include
+/* NOTE: we can't include due to #include dependencies. */
+#include
+
+/* Must track */
-/*
- * struct sigcontext has the same shape as struct pt_regs,
- * but is simplified since we know the fault is from userspace.
- */
struct sigcontext {
- uint_reg_t gregs[53]; /* General-purpose registers. */
- uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
- uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
- uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
- uint_reg_t pc; /* Program counter. */
- uint_reg_t ics; /* In Interrupt Critical Section? */
- uint_reg_t faultnum; /* Fault number. */
- uint_reg_t pad[5];
+ struct pt_regs regs;
};
#endif /* _ASM_TILE_SIGCONTEXT_H */
diff --git a/trunk/arch/tile/include/asm/signal.h b/trunk/arch/tile/include/asm/signal.h
index c1ee1d61d44c..eb0253f32202 100644
--- a/trunk/arch/tile/include/asm/signal.h
+++ b/trunk/arch/tile/include/asm/signal.h
@@ -24,7 +24,6 @@
#include
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
-struct pt_regs;
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
void do_signal(struct pt_regs *regs);
diff --git a/trunk/arch/tile/include/asm/syscalls.h b/trunk/arch/tile/include/asm/syscalls.h
index ce99ffefeacf..af165a74537f 100644
--- a/trunk/arch/tile/include/asm/syscalls.h
+++ b/trunk/arch/tile/include/asm/syscalls.h
@@ -62,12 +62,10 @@ long sys_fork(void);
long _sys_fork(struct pt_regs *regs);
long sys_vfork(void);
long _sys_vfork(struct pt_regs *regs);
-long sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp);
-long _sys_execve(const char __user *filename,
- const char __user *const __user *argv,
- const char __user *const __user *envp, struct pt_regs *regs);
+long sys_execve(char __user *filename, char __user * __user *argv,
+ char __user * __user *envp);
+long _sys_execve(char __user *filename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs);
/* kernel/signal.c */
long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -88,13 +86,10 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
#endif
#ifdef CONFIG_COMPAT
-long compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
-long _compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp,
- struct pt_regs *regs);
+long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp);
+long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp, struct pt_regs *regs);
long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
struct compat_sigaltstack __user *uoss_ptr);
long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
diff --git a/trunk/arch/tile/kernel/intvec_32.S b/trunk/arch/tile/kernel/intvec_32.S
index 8f58bdff20d7..84f296ca9e63 100644
--- a/trunk/arch/tile/kernel/intvec_32.S
+++ b/trunk/arch/tile/kernel/intvec_32.S
@@ -1506,6 +1506,13 @@ handle_ill:
}
STD_ENDPROC(handle_ill)
+ .pushsection .rodata, "a"
+ .align 8
+bpt_code:
+ bpt
+ ENDPROC(bpt_code)
+ .popsection
+
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)
diff --git a/trunk/arch/tile/kernel/process.c b/trunk/arch/tile/kernel/process.c
index 84c29111756c..985cc28c74c5 100644
--- a/trunk/arch/tile/kernel/process.c
+++ b/trunk/arch/tile/kernel/process.c
@@ -408,15 +408,6 @@ static void save_arch_state(struct thread_struct *t)
#if CHIP_HAS_PROC_STATUS_SPR()
t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
#endif
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
-#endif
-#if CHIP_HAS_TILE_RTF_HWM()
- t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
-#endif
-#if CHIP_HAS_DSTREAM_PF()
- t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
-#endif
}
static void restore_arch_state(const struct thread_struct *t)
@@ -437,14 +428,14 @@ static void restore_arch_state(const struct thread_struct *t)
#if CHIP_HAS_PROC_STATUS_SPR()
__insn_mtspr(SPR_PROC_STATUS, t->proc_status);
#endif
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
-#endif
#if CHIP_HAS_TILE_RTF_HWM()
- __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
-#endif
-#if CHIP_HAS_DSTREAM_PF()
- __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
+ /*
+ * Clear this whenever we switch back to a process in case
+ * the previous process was monkeying with it. Even if enabled
+ * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a
+ * performance hint, so isn't worth a full save/restore.
+ */
+ __insn_mtspr(SPR_TILE_RTF_HWM, 0);
#endif
}
@@ -570,9 +561,8 @@ long _sys_execve(const char __user *path,
}
#ifdef CONFIG_COMPAT
-long _compat_sys_execve(const char __user *path,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp, struct pt_regs *regs)
+long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
+ compat_uptr_t __user *envp, struct pt_regs *regs)
{
long error;
char *filename;
@@ -667,7 +657,7 @@ void show_regs(struct pt_regs *regs)
regs->regs[51], regs->regs[52], regs->tp);
pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
#else
- for (i = 0; i < 52; i += 4)
+ for (i = 0; i < 52; i += 3)
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+1, regs->regs[i+1],
diff --git a/trunk/arch/tile/kernel/signal.c b/trunk/arch/tile/kernel/signal.c
index ce183aa1492c..45b66a3c991f 100644
--- a/trunk/arch/tile/kernel/signal.c
+++ b/trunk/arch/tile/kernel/signal.c
@@ -61,19 +61,13 @@ int restore_sigcontext(struct pt_regs *regs,
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- /*
- * Enforce that sigcontext is like pt_regs, and doesn't mess
- * up our stack alignment rules.
- */
- BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
- BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
-
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __get_user(regs->regs[i], &sc->gregs[i]);
+ err |= __get_user(((long *)regs)[i],
+ &((long __user *)(&sc->regs))[i]);
regs->faultnum = INT_SWINT_1_SIGRETURN;
- err |= __get_user(*pr0, &sc->gregs[0]);
+ err |= __get_user(*pr0, &sc->regs.regs[0]);
return err;
}
@@ -118,7 +112,8 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
int i, err = 0;
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
- err |= __put_user(regs->regs[i], &sc->gregs[i]);
+ err |= __put_user(((long *)regs)[i],
+ &((long __user *)(&sc->regs))[i]);
return err;
}
@@ -208,17 +203,19 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Set up registers for signal handler.
* Registers that we don't modify keep the value they had from
* user-space at the time we took the signal.
- * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
- * since some things rely on this (e.g. glibc's debug/segfault.c).
*/
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
regs->sp = (unsigned long) frame;
regs->lr = restorer;
regs->regs[0] = (unsigned long) usig;
- regs->regs[1] = (unsigned long) &frame->info;
- regs->regs[2] = (unsigned long) &frame->uc;
- regs->flags |= PT_FLAGS_CALLER_SAVES;
+
+ if (ka->sa.sa_flags & SA_SIGINFO) {
+ /* Need extra arguments, so mark to restore caller-saves. */
+ regs->regs[1] = (unsigned long) &frame->info;
+ regs->regs[2] = (unsigned long) &frame->uc;
+ regs->flags |= PT_FLAGS_CALLER_SAVES;
+ }
/*
* Notify any tracer that was single-stepping it.
diff --git a/trunk/arch/tile/kernel/stack.c b/trunk/arch/tile/kernel/stack.c
index ea2e0ce28380..38a68b0b4581 100644
--- a/trunk/arch/tile/kernel/stack.c
+++ b/trunk/arch/tile/kernel/stack.c
@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
pr_err(" \n",
frame->info.si_signo);
}
- return (struct pt_regs *)&frame->uc.uc_mcontext;
+ return &frame->uc.uc_mcontext.regs;
}
return NULL;
}
diff --git a/trunk/arch/um/drivers/hostaudio_kern.c b/trunk/arch/um/drivers/hostaudio_kern.c
index 63c740a85b4c..0c46e398cd8f 100644
--- a/trunk/arch/um/drivers/hostaudio_kern.c
+++ b/trunk/arch/um/drivers/hostaudio_kern.c
@@ -40,11 +40,6 @@ static char *mixer = HOSTAUDIO_DEV_MIXER;
" This is used to specify the host mixer device to the hostaudio driver.\n"\
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
-module_param(dsp, charp, 0644);
-MODULE_PARM_DESC(dsp, DSP_HELP);
-module_param(mixer, charp, 0644);
-MODULE_PARM_DESC(mixer, MIXER_HELP);
-
#ifndef MODULE
static int set_dsp(char *name, int *add)
{
@@ -61,6 +56,15 @@ static int set_mixer(char *name, int *add)
}
__uml_setup("mixer=", set_mixer, "mixer=\n" MIXER_HELP);
+
+#else /*MODULE*/
+
+module_param(dsp, charp, 0644);
+MODULE_PARM_DESC(dsp, DSP_HELP);
+
+module_param(mixer, charp, 0644);
+MODULE_PARM_DESC(mixer, MIXER_HELP);
+
#endif
/* /dev/dsp file operations */
diff --git a/trunk/arch/um/drivers/net_kern.c b/trunk/arch/um/drivers/net_kern.c
index 47d0c37897d5..2ab233ba32c1 100644
--- a/trunk/arch/um/drivers/net_kern.c
+++ b/trunk/arch/um/drivers/net_kern.c
@@ -255,6 +255,18 @@ static void uml_net_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static int uml_net_set_mac(struct net_device *dev, void *addr)
+{
+ struct uml_net_private *lp = netdev_priv(dev);
+ struct sockaddr *hwaddr = addr;
+
+ spin_lock_irq(&lp->lock);
+ eth_mac_addr(dev, hwaddr->sa_data);
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
{
dev->mtu = new_mtu;
@@ -361,7 +373,7 @@ static const struct net_device_ops uml_netdev_ops = {
.ndo_start_xmit = uml_net_start_xmit,
.ndo_set_multicast_list = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = uml_net_set_mac,
.ndo_change_mtu = uml_net_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
@@ -460,8 +472,7 @@ static void eth_configure(int n, void *init, char *mac,
((*transport->user->init)(&lp->user, dev) != 0))
goto out_unregister;
- /* don't use eth_mac_addr, it will not work here */
- memcpy(dev->dev_addr, device->mac, ETH_ALEN);
+ eth_mac_addr(dev, device->mac);
dev->mtu = transport->user->mtu;
dev->netdev_ops = ¨_netdev_ops;
dev->ethtool_ops = ¨_net_ethtool_ops;
diff --git a/trunk/arch/um/drivers/ubd_kern.c b/trunk/arch/um/drivers/ubd_kern.c
index 9734994cba1e..1bcd208c459f 100644
--- a/trunk/arch/um/drivers/ubd_kern.c
+++ b/trunk/arch/um/drivers/ubd_kern.c
@@ -163,7 +163,6 @@ struct ubd {
struct scatterlist sg[MAX_SG];
struct request *request;
int start_sg, end_sg;
- sector_t rq_pos;
};
#define DEFAULT_COW { \
@@ -188,7 +187,6 @@ struct ubd {
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
- .rq_pos = 0, \
}
/* Protected by ubd_lock */
@@ -1230,6 +1228,7 @@ static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
+ sector_t sector;
int n;
while(1){
@@ -1240,12 +1239,12 @@ static void do_ubd_request(struct request_queue *q)
return;
dev->request = req;
- dev->rq_pos = blk_rq_pos(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
+ sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
@@ -1257,9 +1256,10 @@ static void do_ubd_request(struct request_queue *q)
return;
}
prepare_request(req, io_req,
- (unsigned long long)dev->rq_pos << 9,
+ (unsigned long long)sector << 9,
sg->offset, sg->length, sg_page(sg));
+ sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){
@@ -1272,7 +1272,6 @@ static void do_ubd_request(struct request_queue *q)
return;
}
- dev->rq_pos += sg->length >> 9;
dev->start_sg++;
}
dev->end_sg = 0;
diff --git a/trunk/arch/um/kernel/exec.c b/trunk/arch/um/kernel/exec.c
index 49b5e1eb3262..cd145eda3579 100644
--- a/trunk/arch/um/kernel/exec.c
+++ b/trunk/arch/um/kernel/exec.c
@@ -62,7 +62,7 @@ static long execve1(const char *file,
return error;
}
-long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
+long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
{
long err;
@@ -72,8 +72,8 @@ long um_execve(const char *file, const char __user *const __user *argv, const ch
return err;
}
-long sys_execve(const char __user *file, const char __user *const __user *argv,
- const char __user *const __user *env)
+long sys_execve(const char __user *file, char __user *__user *argv,
+ char __user *__user *env)
{
long error;
char *filename;
diff --git a/trunk/arch/um/kernel/internal.h b/trunk/arch/um/kernel/internal.h
index 5bf97db24a04..1303a105fe91 100644
--- a/trunk/arch/um/kernel/internal.h
+++ b/trunk/arch/um/kernel/internal.h
@@ -1 +1 @@
-extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
+extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
diff --git a/trunk/arch/um/kernel/syscall.c b/trunk/arch/um/kernel/syscall.c
index f958cb876ee3..5ddb246626db 100644
--- a/trunk/arch/um/kernel/syscall.c
+++ b/trunk/arch/um/kernel/syscall.c
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
fs = get_fs();
set_fs(KERNEL_DS);
- ret = um_execve(filename, (const char __user *const __user *)argv,
- (const char __user *const __user *) envp);
+ ret = um_execve(filename, (char __user *__user *)argv,
+ (char __user *__user *) envp);
set_fs(fs);
return ret;
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index b8676498d8df..cea0cd9a316f 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -25,7 +25,6 @@ config X86
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_PERF_EVENTS if (!M386 && !M486)
- select HAVE_IRQ_WORK
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -34,7 +33,6 @@ config X86
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_C_RECORDMCOUNT
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
@@ -61,8 +59,6 @@ config X86
select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_TEXT_POKE_SMP
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
@@ -674,7 +670,7 @@ config GART_IOMMU
bool "GART IOMMU support" if EMBEDDED
default y
select SWIOTLB
- depends on X86_64 && PCI && AMD_NB
+ depends on X86_64 && PCI && K8_NB
---help---
Support for full DMA access of devices with 32bit memory access only
on systems with more than 3GB. This is usually needed for USB,
@@ -799,17 +795,6 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-config IRQ_TIME_ACCOUNTING
- bool "Fine granularity task level IRQ time accounting"
- default n
- ---help---
- Select this option to enable fine granularity task irq time
- accounting. This is done by reading a timestamp on each
- transitions between softirq and hardirq state, so there can be a
- small performance impact.
-
- If in doubt, say N here.
-
source "kernel/Kconfig.preempt"
config X86_UP_APIC
@@ -1341,34 +1326,25 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
Set whether the default state of memory_corruption_check is
on or off.
-config X86_RESERVE_LOW
- int "Amount of low memory, in kilobytes, to reserve for the BIOS"
- default 64
- range 4 640
+config X86_RESERVE_LOW_64K
+ bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
+ default y
---help---
- Specify the amount of low memory to reserve for the BIOS.
+ Reserve the first 64K of physical RAM on BIOSes that are known
+ to potentially corrupt that memory range. A numbers of BIOSes are
+ known to utilize this area during suspend/resume, so it must not
+ be used by the kernel.
- The first page contains BIOS data structures that the kernel
- must not use, so that page must always be reserved.
+ Set this to N if you are absolutely sure that you trust the BIOS
+ to get all its memory reservations and usages right.
- By default we reserve the first 64K of physical RAM, as a
- number of BIOSes are known to corrupt that memory range
- during events such as suspend/resume or monitor cable
- insertion, so it must not be used by the kernel.
+ If you have doubts about the BIOS (e.g. suspend/resume does not
+ work or there's kernel crashes after certain hardware hotplug
+ events) and it's not AMI or Phoenix, then you might want to enable
+ X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
+ corruption patterns.
- You can set this to 4 if you are absolutely sure that you
- trust the BIOS to get all its memory reservations and usages
- right. If you know your BIOS have problems beyond the
- default 64K area, you can set this to 640 to avoid using the
- entire low memory range.
-
- If you have doubts about the BIOS (e.g. suspend/resume does
- not work or there's kernel crashes after certain hardware
- hotplug events) then you might want to enable
- X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
- typical corruption patterns.
-
- Leave this to the default value of 64 if you are unsure.
+ Say Y if unsure.
config MATH_EMULATION
bool
@@ -2100,7 +2076,7 @@ config OLPC_OPENFIRMWARE
endif # X86_32
-config AMD_NB
+config K8_NB
def_bool y
depends on CPU_SUP_AMD && PCI
@@ -2149,10 +2125,6 @@ config HAVE_ATOMIC_IOMAP
def_bool y
depends on X86_32
-config HAVE_TEXT_POKE_SMP
- bool
- select STOP_MACHINE if SMP
-
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/trunk/arch/x86/Makefile b/trunk/arch/x86/Makefile
index b02e509072a7..8aa1b59b9074 100644
--- a/trunk/arch/x86/Makefile
+++ b/trunk/arch/x86/Makefile
@@ -74,7 +74,7 @@ endif
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
+ ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
stackp-y := -fstack-protector
KBUILD_CFLAGS += $(stackp-y)
else
@@ -96,12 +96,8 @@ cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_en
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
-
-# does binutils support specific instructions?
-asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
-
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/trunk/arch/x86/boot/early_serial_console.c b/trunk/arch/x86/boot/early_serial_console.c
index 5df2869c874b..030f4b93e255 100644
--- a/trunk/arch/x86/boot/early_serial_console.c
+++ b/trunk/arch/x86/boot/early_serial_console.c
@@ -58,19 +58,7 @@ static void parse_earlyprintk(void)
if (arg[pos] == ',')
pos++;
- /*
- * make sure we have
- * "serial,0x3f8,115200"
- * "serial,ttyS0,115200"
- * "ttyS0,115200"
- */
- if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
- port = simple_strtoull(arg + pos, &e, 16);
- if (port == 0 || arg + pos == e)
- port = DEFAULT_SERIAL_PORT;
- else
- pos = e - arg;
- } else if (!strncmp(arg + pos, "ttyS", 4)) {
+ if (!strncmp(arg, "ttyS", 4)) {
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;
diff --git a/trunk/arch/x86/ia32/ia32_aout.c b/trunk/arch/x86/ia32/ia32_aout.c
index 2d93bdbc9ac0..0350311906ae 100644
--- a/trunk/arch/x86/ia32/ia32_aout.c
+++ b/trunk/arch/x86/ia32/ia32_aout.c
@@ -34,7 +34,7 @@
#include
#undef WARN_OLD
-#undef CORE_DUMP /* definitely broken */
+#undef CORE_DUMP /* probably broken */
static int load_aout_binary(struct linux_binprm *, struct pt_regs *regs);
static int load_aout_library(struct file *);
@@ -131,15 +131,21 @@ static void set_brk(unsigned long start, unsigned long end)
* macros to write out all the necessary info.
*/
-#include
+static int dump_write(struct file *file, const void *addr, int nr)
+{
+ return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
#define DUMP_WRITE(addr, nr) \
if (!dump_write(file, (void *)(addr), (nr))) \
goto end_coredump;
-#define DUMP_SEEK(offset) \
- if (!dump_seek(file, offset)) \
- goto end_coredump;
+#define DUMP_SEEK(offset) \
+ if (file->f_op->llseek) { \
+ if (file->f_op->llseek(file, (offset), 0) != (offset)) \
+ goto end_coredump; \
+ } else \
+ file->f_pos = (offset)
#define START_DATA() (u.u_tsize << PAGE_SHIFT)
#define START_STACK(u) (u.start_stack)
@@ -211,6 +217,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
dump_size = dump.u_ssize << PAGE_SHIFT;
DUMP_WRITE(dump_start, dump_size);
}
+ /*
+ * Finally dump the task struct. Not be used by gdb, but
+ * could be useful
+ */
+ set_fs(KERNEL_DS);
+ DUMP_WRITE(current, sizeof(*current));
end_coredump:
set_fs(fs);
return has_dumped;
diff --git a/trunk/arch/x86/ia32/ia32entry.S b/trunk/arch/x86/ia32/ia32entry.S
index 518bb99c3394..b86feabed69b 100644
--- a/trunk/arch/x86/ia32/ia32entry.S
+++ b/trunk/arch/x86/ia32/ia32entry.S
@@ -50,12 +50,7 @@
/*
* Reload arg registers from stack in case ptrace changed them.
* We don't reload %eax because syscall_trace_enter() returned
- * the %rax value we should see. Instead, we just truncate that
- * value to 32 bits again as we did on entry from user mode.
- * If it's a new value set by user_regset during entry tracing,
- * this matches the normal truncation of the user-mode value.
- * If it's -1 to make us punt the syscall, then (u32)-1 is still
- * an appropriately invalid value.
+ * the value it wants us to use in the table lookup.
*/
.macro LOAD_ARGS32 offset, _r9=0
.if \_r9
@@ -65,7 +60,6 @@
movl \offset+48(%rsp),%edx
movl \offset+56(%rsp),%esi
movl \offset+64(%rsp),%edi
- movl %eax,%eax /* zero extension */
.endm
.macro CFI_STARTPROC32 simple
@@ -159,7 +153,7 @@ ENTRY(ia32_sysenter_target)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
+ cmpl $(IA32_NR_syscalls-1),%eax
ja ia32_badsys
sysenter_do_call:
IA32_ARG_FIXUP
@@ -201,7 +195,7 @@ sysexit_from_sys_call:
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
- cmpq $(IA32_NR_syscalls-1),%rax
+ cmpl $(IA32_NR_syscalls-1),%eax
ja ia32_badsys
movl %ebx,%edi /* reload 1st syscall arg */
movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
@@ -254,7 +248,7 @@ sysenter_tracesys:
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
+ cmpl $(IA32_NR_syscalls-1),%eax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
jmp sysenter_do_call
CFI_ENDPROC
@@ -320,7 +314,7 @@ ENTRY(ia32_cstar_target)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
- cmpq $IA32_NR_syscalls-1,%rax
+ cmpl $IA32_NR_syscalls-1,%eax
ja ia32_badsys
cstar_do_call:
IA32_ARG_FIXUP 1
@@ -373,7 +367,7 @@ cstar_tracesys:
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
RESTORE_REST
xchgl %ebp,%r9d
- cmpq $(IA32_NR_syscalls-1),%rax
+ cmpl $(IA32_NR_syscalls-1),%eax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
jmp cstar_do_call
END(ia32_cstar_target)
@@ -431,7 +425,7 @@ ENTRY(ia32_syscall)
orl $TS_COMPAT,TI_status(%r10)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
jnz ia32_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
+ cmpl $(IA32_NR_syscalls-1),%eax
ja ia32_badsys
ia32_do_call:
IA32_ARG_FIXUP
@@ -450,7 +444,7 @@ ia32_tracesys:
call syscall_trace_enter
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
+ cmpl $(IA32_NR_syscalls-1),%eax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
jmp ia32_do_call
END(ia32_syscall)
diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h
index 76561d20ea2f..bc6abb7bc7ee 100644
--- a/trunk/arch/x86/include/asm/alternative.h
+++ b/trunk/arch/x86/include/asm/alternative.h
@@ -4,7 +4,6 @@
#include
#include
#include
-#include
#include
/*
@@ -161,8 +160,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL
#endif
-extern void *text_poke_early(void *addr, const void *opcode, size_t len);
-
/*
* Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages.
@@ -183,12 +180,4 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
-#define IDEAL_NOP_SIZE_5 5
-extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
-extern void arch_init_ideal_nop5(void);
-#else
-static inline void arch_init_ideal_nop5(void) {}
-#endif
-
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/trunk/arch/x86/include/asm/amd_iommu.h b/trunk/arch/x86/include/asm/amd_iommu.h
index f16a2caca1e0..5af2982133b5 100644
--- a/trunk/arch/x86/include/asm/amd_iommu.h
+++ b/trunk/arch/x86/include/asm/amd_iommu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
* Author: Joerg Roedel
* Leo Duran
*
diff --git a/trunk/arch/x86/include/asm/amd_iommu_proto.h b/trunk/arch/x86/include/asm/amd_iommu_proto.h
index 916bc8111a01..d2544f1d705d 100644
--- a/trunk/arch/x86/include/asm/amd_iommu_proto.h
+++ b/trunk/arch/x86/include/asm/amd_iommu_proto.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Advanced Micro Devices, Inc.
* Author: Joerg Roedel
*
* This program is free software; you can redistribute it and/or modify it
@@ -38,10 +38,4 @@ static inline void amd_iommu_stats_init(void) { }
#endif /* !CONFIG_AMD_IOMMU_STATS */
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
- return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
- (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/trunk/arch/x86/include/asm/amd_iommu_types.h b/trunk/arch/x86/include/asm/amd_iommu_types.h
index e3509fc303bf..7014e88bc779 100644
--- a/trunk/arch/x86/include/asm/amd_iommu_types.h
+++ b/trunk/arch/x86/include/asm/amd_iommu_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
* Author: Joerg Roedel
* Leo Duran
*
@@ -368,9 +368,6 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
- /* flags read from acpi table */
- u8 acpi_flags;
-
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -414,24 +411,6 @@ struct amd_iommu {
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
-
- /*
- * We can't rely on the BIOS to restore all values on reinit, so we
- * need to stash them
- */
-
- /* The iommu BAR */
- u32 stored_addr_lo;
- u32 stored_addr_hi;
-
- /*
- * Each iommu has 6 l1s, each of which is documented as having 0x12
- * registers
- */
- u32 stored_l1[6][0x12];
-
- /* The l2 indirect registers */
- u32 stored_l2[0x83];
};
/*
diff --git a/trunk/arch/x86/include/asm/apb_timer.h b/trunk/arch/x86/include/asm/apb_timer.h
index 2fefa501d3ba..a69b1ac9eaf8 100644
--- a/trunk/arch/x86/include/asm/apb_timer.h
+++ b/trunk/arch/x86/include/asm/apb_timer.h
@@ -54,6 +54,7 @@ extern struct clock_event_device *global_clock_event;
extern unsigned long apbt_quick_calibrate(void);
extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
extern void apbt_setup_secondary_clock(void);
+extern unsigned int boot_cpu_id;
extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
diff --git a/trunk/arch/x86/include/asm/bitops.h b/trunk/arch/x86/include/asm/bitops.h
index bafd80defa43..545776efeb16 100644
--- a/trunk/arch/x86/include/asm/bitops.h
+++ b/trunk/arch/x86/include/asm/bitops.h
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
- (addr[nr / BITS_PER_LONG])) != 0;
+ (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/trunk/arch/x86/include/asm/compat.h b/trunk/arch/x86/include/asm/compat.h
index 1d9cd27c2920..306160e58b48 100644
--- a/trunk/arch/x86/include/asm/compat.h
+++ b/trunk/arch/x86/include/asm/compat.h
@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-static inline void __user *arch_compat_alloc_user_space(long len)
+static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
diff --git a/trunk/arch/x86/include/asm/cpu.h b/trunk/arch/x86/include/asm/cpu.h
index 4fab24de26b1..b185091bf19c 100644
--- a/trunk/arch/x86/include/asm/cpu.h
+++ b/trunk/arch/x86/include/asm/cpu.h
@@ -32,5 +32,6 @@ extern void arch_unregister_cpu(int);
DECLARE_PER_CPU(int, cpu_state);
+extern unsigned int boot_cpu_id;
#endif /* _ASM_X86_CPU_H */
diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h
index 220e2ea08e80..781a50b29a49 100644
--- a/trunk/arch/x86/include/asm/cpufeature.h
+++ b/trunk/arch/x86/include/asm/cpufeature.h
@@ -152,14 +152,10 @@
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -172,7 +168,6 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -184,13 +179,6 @@
#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
-
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
@@ -308,7 +296,6 @@ extern const char * const x86_power_flags[32];
#endif /* CONFIG_X86_64 */
-#if __GNUC__ >= 4
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
@@ -317,7 +304,7 @@ extern const char * const x86_power_flags[32];
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
-#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
asm goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
@@ -358,6 +345,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
#endif
}
+#if __GNUC__ >= 4
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
diff --git a/trunk/arch/x86/include/asm/dwarf2.h b/trunk/arch/x86/include/asm/dwarf2.h
index 326099199318..733f7e91e7a9 100644
--- a/trunk/arch/x86/include/asm/dwarf2.h
+++ b/trunk/arch/x86/include/asm/dwarf2.h
@@ -89,16 +89,6 @@
CFI_ADJUST_CFA_OFFSET -8
.endm
- .macro pushfq_cfi
- pushfq
- CFI_ADJUST_CFA_OFFSET 8
- .endm
-
- .macro popfq_cfi
- popfq
- CFI_ADJUST_CFA_OFFSET -8
- .endm
-
.macro movq_cfi reg offset=0
movq %\reg, \offset(%rsp)
CFI_REL_OFFSET \reg, \offset
@@ -119,16 +109,6 @@
CFI_ADJUST_CFA_OFFSET -4
.endm
- .macro pushfl_cfi
- pushfl
- CFI_ADJUST_CFA_OFFSET 4
- .endm
-
- .macro popfl_cfi
- popfl
- CFI_ADJUST_CFA_OFFSET -4
- .endm
-
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
diff --git a/trunk/arch/x86/include/asm/entry_arch.h b/trunk/arch/x86/include/asm/entry_arch.h
index b8e96a18676b..8e8ec663a98f 100644
--- a/trunk/arch/x86/include/asm/entry_arch.h
+++ b/trunk/arch/x86/include/asm/entry_arch.h
@@ -49,8 +49,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-#ifdef CONFIG_IRQ_WORK
-BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
+#ifdef CONFIG_PERF_EVENTS
+BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/trunk/arch/x86/include/asm/gart.h b/trunk/arch/x86/include/asm/gart.h
index bf357f9b25f0..4ac5b0f33fc1 100644
--- a/trunk/arch/x86/include/asm/gart.h
+++ b/trunk/arch/x86/include/asm/gart.h
@@ -17,7 +17,6 @@ extern int fix_aperture;
#define GARTEN (1<<0)
#define DISGARTCPU (1<<4)
#define DISGARTIO (1<<5)
-#define DISTLBWALKPRB (1<<6)
/* GART cache control register bits. */
#define INVGART (1<<0)
@@ -28,6 +27,7 @@ extern int fix_aperture;
#define AMD64_GARTAPERTUREBASE 0x94
#define AMD64_GARTTABLEBASE 0x98
#define AMD64_GARTCACHECTL 0x9c
+#define AMD64_GARTEN (1<<0)
#ifdef CONFIG_GART_IOMMU
extern int gart_iommu_aperture;
@@ -57,19 +57,6 @@ static inline void gart_iommu_hole_init(void)
extern int agp_amd64_init(void);
-static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
-{
- u32 ctl;
-
- /*
- * Don't enable translation but enable GART IO and CPU accesses.
- * Also, set DISTLBWALKPRB since GART tables memory is UC.
- */
- ctl = DISTLBWALKPRB | order << 1;
-
- pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
-}
-
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
diff --git a/trunk/arch/x86/include/asm/hardirq.h b/trunk/arch/x86/include/asm/hardirq.h
index 55e4de613f0e..aeab29aee617 100644
--- a/trunk/arch/x86/include/asm/hardirq.h
+++ b/trunk/arch/x86/include/asm/hardirq.h
@@ -14,7 +14,7 @@ typedef struct {
#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
- unsigned int apic_irq_work_irqs;
+ unsigned int apic_pending_irqs;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
diff --git a/trunk/arch/x86/include/asm/hpet.h b/trunk/arch/x86/include/asm/hpet.h
index 1d5c08a1bdfd..004e6e25e913 100644
--- a/trunk/arch/x86/include/asm/hpet.h
+++ b/trunk/arch/x86/include/asm/hpet.h
@@ -68,6 +68,7 @@ extern unsigned long force_hpet_address;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
+extern u8 hpet_readback_cmp;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
diff --git a/trunk/arch/x86/include/asm/hw_breakpoint.h b/trunk/arch/x86/include/asm/hw_breakpoint.h
index 824ca07860d0..528a11e8d3e3 100644
--- a/trunk/arch/x86/include/asm/hw_breakpoint.h
+++ b/trunk/arch/x86/include/asm/hw_breakpoint.h
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
#include
/* Available HW breakpoint length encodings */
-#define X86_BREAKPOINT_LEN_X 0x40
+#define X86_BREAKPOINT_LEN_X 0x00
#define X86_BREAKPOINT_LEN_1 0x40
#define X86_BREAKPOINT_LEN_2 0x44
#define X86_BREAKPOINT_LEN_4 0x4c
diff --git a/trunk/arch/x86/include/asm/hw_irq.h b/trunk/arch/x86/include/asm/hw_irq.h
index 3a54a1ca1a02..46c0fe05f230 100644
--- a/trunk/arch/x86/include/asm/hw_irq.h
+++ b/trunk/arch/x86/include/asm/hw_irq.h
@@ -29,7 +29,7 @@
extern void apic_timer_interrupt(void);
extern void x86_platform_ipi(void);
extern void error_interrupt(void);
-extern void irq_work_interrupt(void);
+extern void perf_pending_interrupt(void);
extern void spurious_interrupt(void);
extern void thermal_interrupt(void);
diff --git a/trunk/arch/x86/include/asm/i387.h b/trunk/arch/x86/include/asm/i387.h
index 4aa2bb3b242a..a73a8d5a5e69 100644
--- a/trunk/arch/x86/include/asm/i387.h
+++ b/trunk/arch/x86/include/asm/i387.h
@@ -55,12 +55,6 @@ extern int save_i387_xstate_ia32(void __user *buf);
extern int restore_i387_xstate_ia32(void __user *buf);
#endif
-#ifdef CONFIG_MATH_EMULATION
-extern void finit_soft_fpu(struct i387_soft_struct *soft);
-#else
-static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
-#endif
-
#define X87_FSW_ES (1 << 7) /* Exception Summary */
static __always_inline __pure bool use_xsaveopt(void)
@@ -73,11 +67,6 @@ static __always_inline __pure bool use_xsave(void)
return static_cpu_has(X86_FEATURE_XSAVE);
}
-static __always_inline __pure bool use_fxsr(void)
-{
- return static_cpu_has(X86_FEATURE_FXSR);
-}
-
extern void __sanitize_i387_state(struct task_struct *);
static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -88,11 +77,19 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
}
#ifdef CONFIG_X86_64
+
+/* Ignore delayed exceptions from user space */
+static inline void tolerant_fwait(void)
+{
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+}
+
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
int err;
- /* See comment in fxsave() below. */
asm volatile("1: rex64/fxrstor (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
@@ -101,10 +98,44 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
".previous\n"
_ASM_EXTABLE(1b, 3b)
: [err] "=r" (err)
- : [fx] "R" (fx), "m" (*fx), "0" (0));
+#if 0 /* See comment in fxsave() below. */
+ : [fx] "r" (fx), "m" (*fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
+#endif
return err;
}
+/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. The kernel data segment can be sometimes 0 and sometimes
+ new user value. Both should be ok.
+ Use the PDA as safe address because it should be already in L1. */
+static inline void fpu_clear(struct fpu *fpu)
+{
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
+
+ /*
+ * xsave header may indicate the init state of the FP.
+ */
+ if (use_xsave() &&
+ !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
+ return;
+
+ if (unlikely(fx->swd & X87_FSW_ES))
+ asm volatile("fnclex");
+ alternative_input(ASM_NOP8 ASM_NOP2,
+ " emms\n" /* clear stack tags */
+ " fildl %%gs:0", /* load to clear state */
+ X86_FEATURE_FXSAVE_LEAK);
+}
+
+static inline void clear_fpu_state(struct task_struct *tsk)
+{
+ fpu_clear(&tsk->thread.fpu);
+}
+
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
@@ -118,7 +149,6 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
if (unlikely(err))
return -EFAULT;
- /* See comment in fxsave() below. */
asm volatile("1: rex64/fxsave (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
@@ -127,7 +157,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
".previous\n"
_ASM_EXTABLE(1b, 3b)
: [err] "=r" (err), "=m" (*fx)
- : [fx] "R" (fx), "0" (0));
+#if 0 /* See comment in fxsave() below. */
+ : [fx] "r" (fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "0" (0));
+#endif
if (unlikely(err) &&
__clear_user(fx, sizeof(struct i387_fxsave_struct)))
err = -EFAULT;
@@ -141,29 +175,56 @@ static inline void fpu_fxsave(struct fpu *fpu)
uses any extended registers for addressing, a second REX prefix
will be generated (to the assembler, rex64 followed by semicolon
is a separate instruction), and hence the 64-bitness is lost. */
-
-#ifdef CONFIG_AS_FXSAVEQ
+#if 0
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
: "=m" (fpu->state->fxsave));
-#else
+#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
- needed for addressing (fix submitted to mainline 2005-11-21).
- asm volatile("rex64/fxsave %0"
- : "=m" (fpu->state->fxsave));
- This, however, we can work around by forcing the compiler to select
+ needed for addressing (fix submitted to mainline 2005-11-21). */
+ __asm__ __volatile__("rex64/fxsave %0"
+ : "=m" (fpu->state->fxsave));
+#else
+ /* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
- asm volatile("rex64/fxsave (%[fx])"
- : "=m" (fpu->state->fxsave)
- : [fx] "R" (&fpu->state->fxsave));
+ __asm__ __volatile__("rex64/fxsave (%1)"
+ : "=m" (fpu->state->fxsave)
+ : "cdaSDb" (&fpu->state->fxsave));
#endif
}
+static inline void fpu_save_init(struct fpu *fpu)
+{
+ if (use_xsave())
+ fpu_xsave(fpu);
+ else
+ fpu_fxsave(fpu);
+
+ fpu_clear(fpu);
+}
+
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
+}
+
#else /* CONFIG_X86_32 */
+#ifdef CONFIG_MATH_EMULATION
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
+#else
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
+#endif
+
+static inline void tolerant_fwait(void)
+{
+ asm volatile("fnclex ; fwait");
+}
+
/* perform fxrstor iff the processor has extended states, otherwise frstor */
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
@@ -180,14 +241,6 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
return 0;
}
-static inline void fpu_fxsave(struct fpu *fpu)
-{
- asm volatile("fxsave %[fx]"
- : [fx] "=m" (fpu->state->fxsave));
-}
-
-#endif /* CONFIG_X86_64 */
-
/* We need a safe address that is cheap to find and that is already
in L1 during context switch. The best choices are unfortunately
different for UP and SMP */
@@ -203,33 +256,47 @@ static inline void fpu_fxsave(struct fpu *fpu)
static inline void fpu_save_init(struct fpu *fpu)
{
if (use_xsave()) {
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
+
fpu_xsave(fpu);
/*
* xsave header may indicate the init state of the FP.
*/
- if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
- return;
- } else if (use_fxsr()) {
- fpu_fxsave(fpu);
- } else {
- asm volatile("fsave %[fx]; fwait"
- : [fx] "=m" (fpu->state->fsave));
- return;
- }
+ if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
+ goto end;
- if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
- asm volatile("fnclex");
+ if (unlikely(fx->swd & X87_FSW_ES))
+ asm volatile("fnclex");
+ /*
+ * we can do a simple return here or be paranoid :)
+ */
+ goto clear_state;
+ }
+
+ /* Use more nops than strictly needed in case the compiler
+ varies code */
+ alternative_input(
+ "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
+ "fxsave %[fx]\n"
+ "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
+ X86_FEATURE_FXSR,
+ [fx] "m" (fpu->state->fxsave),
+ [fsw] "m" (fpu->state->fxsave.swd) : "memory");
+clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
alternative_input(
- ASM_NOP8 ASM_NOP2,
+ GENERIC_NOP8 GENERIC_NOP2,
"emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
+ "fildl %[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
+end:
+ ;
}
static inline void __save_init_fpu(struct task_struct *tsk)
@@ -238,6 +305,9 @@ static inline void __save_init_fpu(struct task_struct *tsk)
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
+
+#endif /* CONFIG_X86_64 */
+
static inline int fpu_fxrstor_checking(struct fpu *fpu)
{
return fxrstor_checking(&fpu->state->fxsave);
@@ -274,10 +344,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk)
static inline void __clear_fpu(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
- /* Ignore delayed exceptions from user space */
- asm volatile("1: fwait\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b));
+ tolerant_fwait();
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
}
@@ -338,6 +405,19 @@ static inline void irq_ts_restore(int TS_state)
stts();
}
+#ifdef CONFIG_X86_64
+
+static inline void save_init_fpu(struct task_struct *tsk)
+{
+ __save_init_fpu(tsk);
+ stts();
+}
+
+#define unlazy_fpu __unlazy_fpu
+#define clear_fpu __clear_fpu
+
+#else /* CONFIG_X86_32 */
+
/*
* These disable preemption on their own and are safe
*/
@@ -363,6 +443,8 @@ static inline void clear_fpu(struct task_struct *tsk)
preempt_enable();
}
+#endif /* CONFIG_X86_64 */
+
/*
* i387 state interaction
*/
@@ -426,4 +508,7 @@ extern void fpu_finit(struct fpu *fpu);
#endif /* __ASSEMBLY__ */
+#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
+#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
+
#endif /* _ASM_X86_I387_H */
diff --git a/trunk/arch/x86/include/asm/irq_vectors.h b/trunk/arch/x86/include/asm/irq_vectors.h
index 6af0894dafb4..e2ca30092557 100644
--- a/trunk/arch/x86/include/asm/irq_vectors.h
+++ b/trunk/arch/x86/include/asm/irq_vectors.h
@@ -114,9 +114,9 @@
#define X86_PLATFORM_IPI_VECTOR 0xed
/*
- * IRQ work vector:
+ * Performance monitoring pending work vector:
*/
-#define IRQ_WORK_VECTOR 0xec
+#define LOCAL_PENDING_VECTOR 0xec
#define UV_BAU_MESSAGE 0xea
diff --git a/trunk/arch/x86/include/asm/jump_label.h b/trunk/arch/x86/include/asm/jump_label.h
deleted file mode 100644
index f52d42e80585..000000000000
--- a/trunk/arch/x86/include/asm/jump_label.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _ASM_X86_JUMP_LABEL_H
-#define _ASM_X86_JUMP_LABEL_H
-
-#ifdef __KERNEL__
-
-#include
-#include
-
-#define JUMP_LABEL_NOP_SIZE 5
-
-# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
-
-# define JUMP_LABEL(key, label) \
- do { \
- asm goto("1:" \
- JUMP_LABEL_INITIAL_NOP \
- ".pushsection __jump_table, \"a\" \n\t"\
- _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
- ".popsection \n\t" \
- : : "i" (key) : : label); \
- } while (0)
-
-#endif /* __KERNEL__ */
-
-#ifdef CONFIG_X86_64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
-#endif
diff --git a/trunk/arch/x86/include/asm/amd_nb.h b/trunk/arch/x86/include/asm/k8.h
similarity index 61%
rename from trunk/arch/x86/include/asm/amd_nb.h
rename to trunk/arch/x86/include/asm/k8.h
index c8517f81b21e..af00bd1d2089 100644
--- a/trunk/arch/x86/include/asm/amd_nb.h
+++ b/trunk/arch/x86/include/asm/k8.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_AMD_NB_H
-#define _ASM_X86_AMD_NB_H
+#ifndef _ASM_X86_K8_H
+#define _ASM_X86_K8_H
#include
@@ -7,27 +7,24 @@ extern struct pci_device_id k8_nb_ids[];
struct bootnode;
extern int early_is_k8_nb(u32 value);
+extern struct pci_dev **k8_northbridges;
+extern int num_k8_northbridges;
extern int cache_k8_northbridges(void);
extern void k8_flush_garts(void);
extern int k8_get_nodes(struct bootnode *nodes);
extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int k8_scan_nodes(void);
-struct k8_northbridge_info {
- u16 num;
- u8 gart_supported;
- struct pci_dev **nb_misc;
-};
-extern struct k8_northbridge_info k8_northbridges;
-
-#ifdef CONFIG_AMD_NB
+#ifdef CONFIG_K8_NB
+extern int num_k8_northbridges;
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
- return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL;
+ return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
#else
+#define num_k8_northbridges 0
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
@@ -36,4 +33,4 @@ static inline struct pci_dev *node_to_k8_nb_misc(int node)
#endif
-#endif /* _ASM_X86_AMD_NB_H */
+#endif /* _ASM_X86_K8_H */
diff --git a/trunk/arch/x86/include/asm/kvm_host.h b/trunk/arch/x86/include/asm/kvm_host.h
index c52e2eb40a1e..502e53f999cf 100644
--- a/trunk/arch/x86/include/asm/kvm_host.h
+++ b/trunk/arch/x86/include/asm/kvm_host.h
@@ -652,6 +652,20 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
return (struct kvm_mmu_page *)page_private(page);
}
+static inline u16 kvm_read_fs(void)
+{
+ u16 seg;
+ asm("mov %%fs, %0" : "=g"(seg));
+ return seg;
+}
+
+static inline u16 kvm_read_gs(void)
+{
+ u16 seg;
+ asm("mov %%gs, %0" : "=g"(seg));
+ return seg;
+}
+
static inline u16 kvm_read_ldt(void)
{
u16 ldt;
@@ -659,6 +673,16 @@ static inline u16 kvm_read_ldt(void)
return ldt;
}
+static inline void kvm_load_fs(u16 sel)
+{
+ asm("mov %0, %%fs" : : "rm"(sel));
+}
+
+static inline void kvm_load_gs(u16 sel)
+{
+ asm("mov %0, %%gs" : : "rm"(sel));
+}
+
static inline void kvm_load_ldt(u16 sel)
{
asm("lldt %0" : : "rm"(sel));
diff --git a/trunk/arch/x86/include/asm/mwait.h b/trunk/arch/x86/include/asm/mwait.h
new file mode 100644
index 000000000000..bcdff997668c
--- /dev/null
+++ b/trunk/arch/x86/include/asm/mwait.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_X86_MWAIT_H
+#define _ASM_X86_MWAIT_H
+
+#define MWAIT_SUBSTATE_MASK 0xf
+#define MWAIT_CSTATE_MASK 0xf
+#define MWAIT_SUBSTATE_SIZE 4
+#define MWAIT_MAX_NUM_CSTATES 8
+
+#define CPUID_MWAIT_LEAF 5
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
+#define CPUID5_ECX_INTERRUPT_BREAK 0x2
+
+#define MWAIT_ECX_INTERRUPT_BREAK 0x1
+
+#endif /* _ASM_X86_MWAIT_H */
diff --git a/trunk/arch/x86/include/asm/perf_event_p4.h b/trunk/arch/x86/include/asm/perf_event_p4.h
index a70cd216be5d..def500776b16 100644
--- a/trunk/arch/x86/include/asm/perf_event_p4.h
+++ b/trunk/arch/x86/include/asm/perf_event_p4.h
@@ -36,6 +36,19 @@
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
+/* Non HT mask */
+#define P4_ESCR_MASK \
+ (P4_ESCR_EVENT_MASK | \
+ P4_ESCR_EVENTMASK_MASK | \
+ P4_ESCR_TAG_MASK | \
+ P4_ESCR_TAG_ENABLE | \
+ P4_ESCR_T0_OS | \
+ P4_ESCR_T0_USR)
+
+/* HT mask */
+#define P4_ESCR_MASK_HT \
+ (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
+
#define P4_CCCR_OVF 0x80000000U
#define P4_CCCR_CASCADE 0x40000000U
#define P4_CCCR_OVF_PMI_T0 0x04000000U
@@ -57,6 +70,23 @@
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
+/* Non HT mask */
+#define P4_CCCR_MASK \
+ (P4_CCCR_OVF | \
+ P4_CCCR_CASCADE | \
+ P4_CCCR_OVF_PMI_T0 | \
+ P4_CCCR_FORCE_OVF | \
+ P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | \
+ P4_CCCR_ESCR_SELECT_MASK | \
+ P4_CCCR_ENABLE)
+
+/* HT mask */
+#define P4_CCCR_MASK_HT \
+ (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
+
#define P4_GEN_ESCR_EMASK(class, name, bit) \
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_EMASK_BIT(class, name) class##__##name
@@ -97,28 +127,6 @@
#define P4_CONFIG_HT_SHIFT 63
#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
-/*
- * The bits we allow to pass for RAW events
- */
-#define P4_CONFIG_MASK_ESCR \
- P4_ESCR_EVENT_MASK | \
- P4_ESCR_EVENTMASK_MASK | \
- P4_ESCR_TAG_MASK | \
- P4_ESCR_TAG_ENABLE
-
-#define P4_CONFIG_MASK_CCCR \
- P4_CCCR_EDGE | \
- P4_CCCR_THRESHOLD_MASK | \
- P4_CCCR_COMPLEMENT | \
- P4_CCCR_COMPARE | \
- P4_CCCR_THREAD_ANY | \
- P4_CCCR_RESERVED
-
-/* some dangerous bits are reserved for kernel internals */
-#define P4_CONFIG_MASK \
- (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
- (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
-
static inline bool p4_is_event_cascaded(u64 config)
{
u32 cccr = p4_config_unpack_cccr(config);
diff --git a/trunk/arch/x86/include/asm/processor.h b/trunk/arch/x86/include/asm/processor.h
index a668d66301fe..325b7bdbebaa 100644
--- a/trunk/arch/x86/include/asm/processor.h
+++ b/trunk/arch/x86/include/asm/processor.h
@@ -110,8 +110,6 @@ struct cpuinfo_x86 {
u16 phys_proc_id;
/* Core id: */
u16 cpu_core_id;
- /* Compute unit id */
- u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
#endif
@@ -604,7 +602,7 @@ extern unsigned long mmu_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
- unsigned long cr4;
+ unsigned cr4;
mmu_cr4_features |= mask;
cr4 = read_cr4();
@@ -614,7 +612,7 @@ static inline void set_in_cr4(unsigned long mask)
static inline void clear_in_cr4(unsigned long mask)
{
- unsigned long cr4;
+ unsigned cr4;
mmu_cr4_features &= ~mask;
cr4 = read_cr4();
diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile
index f49257e33805..0925676266bd 100644
--- a/trunk/arch/x86/kernel/Makefile
+++ b/trunk/arch/x86/kernel/Makefile
@@ -11,8 +11,6 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
-CFLAGS_REMOVE_pvclock.o = -pg
-CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
@@ -34,8 +32,7 @@ GCOV_PROFILE_paravirt.o := n
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
-obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
-obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-y += setup.o x86_init.o i8259.o irqinit.o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
@@ -90,7 +87,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_APB_TIMER) += apb_timer.o
-obj-$(CONFIG_AMD_NB) += amd_nb.o
+obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
@@ -123,6 +120,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
+ obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
diff --git a/trunk/arch/x86/kernel/acpi/cstate.c b/trunk/arch/x86/kernel/acpi/cstate.c
index fb16f17e59be..bcc4adda7b9b 100644
--- a/trunk/arch/x86/kernel/acpi/cstate.c
+++ b/trunk/arch/x86/kernel/acpi/cstate.c
@@ -13,6 +13,7 @@
#include
#include
+#include
/*
* Initialize bm_flags based on the CPU cache properties
@@ -61,20 +62,10 @@ struct cstate_entry {
unsigned int ecx;
} states[ACPI_PROCESSOR_MAX_POWER];
};
-static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
+static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */
static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
-#define MWAIT_SUBSTATE_MASK (0xf)
-#define MWAIT_CSTATE_MASK (0xf)
-#define MWAIT_SUBSTATE_SIZE (4)
-
-#define CPUID_MWAIT_LEAF (5)
-#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
-#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
-
-#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
-
#define NATIVE_CSTATE_BEYOND_HALT (2)
static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c
index a36bb90aef53..f65ab8b014c4 100644
--- a/trunk/arch/x86/kernel/alternative.c
+++ b/trunk/arch/x86/kernel/alternative.c
@@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
-void *text_poke_early(void *addr, const void *opcode, size_t len);
+static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
@@ -522,7 +522,7 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
-void *__init_or_module text_poke_early(void *addr, const void *opcode,
+static void *__init_or_module text_poke_early(void *addr, const void *opcode,
size_t len)
{
unsigned long flags;
@@ -637,72 +637,7 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
tpp.len = len;
atomic_set(&stop_machine_first, 1);
wrote_text = 0;
- /* Use __stop_machine() because the caller already got online_cpus. */
- __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+ stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
return addr;
}
-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
-
-unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
-
-void __init arch_init_ideal_nop5(void)
-{
- extern const unsigned char ftrace_test_p6nop[];
- extern const unsigned char ftrace_test_nop5[];
- extern const unsigned char ftrace_test_jmp[];
- int faulted = 0;
-
- /*
- * There is no good nop for all x86 archs.
- * We will default to using the P6_NOP5, but first we
- * will test to make sure that the nop will actually
- * work on this CPU. If it faults, we will then
- * go to a lesser efficient 5 byte nop. If that fails
- * we then just use a jmp as our nop. This isn't the most
- * efficient nop, but we can not use a multi part nop
- * since we would then risk being preempted in the middle
- * of that nop, and if we enabled tracing then, it might
- * cause a system crash.
- *
- * TODO: check the cpuid to determine the best nop.
- */
- asm volatile (
- "ftrace_test_jmp:"
- "jmp ftrace_test_p6nop\n"
- "nop\n"
- "nop\n"
- "nop\n" /* 2 byte jmp + 3 bytes */
- "ftrace_test_p6nop:"
- P6_NOP5
- "jmp 1f\n"
- "ftrace_test_nop5:"
- ".byte 0x66,0x66,0x66,0x66,0x90\n"
- "1:"
- ".section .fixup, \"ax\"\n"
- "2: movl $1, %0\n"
- " jmp ftrace_test_nop5\n"
- "3: movl $2, %0\n"
- " jmp 1b\n"
- ".previous\n"
- _ASM_EXTABLE(ftrace_test_p6nop, 2b)
- _ASM_EXTABLE(ftrace_test_nop5, 3b)
- : "=r"(faulted) : "0" (faulted));
-
- switch (faulted) {
- case 0:
- pr_info("converting mcount calls to 0f 1f 44 00 00\n");
- memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
- break;
- case 1:
- pr_info("converting mcount calls to 66 66 66 66 90\n");
- memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
- break;
- case 2:
- pr_info("converting mcount calls to jmp . + 5\n");
- memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
- break;
- }
-
-}
-#endif
diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c
index d2fdb0826df2..fa044e1e30a2 100644
--- a/trunk/arch/x86/kernel/amd_iommu.c
+++ b/trunk/arch/x86/kernel/amd_iommu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
* Author: Joerg Roedel
* Leo Duran
*
@@ -1953,7 +1953,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size,
int dir)
{
- dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@@ -1961,7 +1960,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
(dma_addr + size > dma_dom->aperture_size))
return;
- flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -1976,7 +1974,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(&dma_dom->domain, flush_addr, size);
+ iommu_flush_pages(&dma_dom->domain, dma_addr, size);
dma_dom->need_flush = false;
}
}
diff --git a/trunk/arch/x86/kernel/amd_iommu_init.c b/trunk/arch/x86/kernel/amd_iommu_init.c
index 3cb482e123de..3cc63e2b8dd4 100644
--- a/trunk/arch/x86/kernel/amd_iommu_init.c
+++ b/trunk/arch/x86/kernel/amd_iommu_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
* Author: Joerg Roedel
* Leo Duran
*
@@ -194,39 +194,6 @@ static inline unsigned long tbl_size(int entry_size)
return 1UL << shift;
}
-/* Access to l1 and l2 indexed register spaces */
-
-static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
-{
- u32 val;
-
- pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
- pci_read_config_dword(iommu->dev, 0xfc, &val);
- return val;
-}
-
-static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
-{
- pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
- pci_write_config_dword(iommu->dev, 0xfc, val);
- pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
-}
-
-static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
-{
- u32 val;
-
- pci_write_config_dword(iommu->dev, 0xf0, address);
- pci_read_config_dword(iommu->dev, 0xf4, &val);
- return val;
-}
-
-static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
-{
- pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
- pci_write_config_dword(iommu->dev, 0xf4, val);
-}
-
/****************************************************************************
*
* AMD IOMMU MMIO register space handling functions
@@ -652,7 +619,6 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc;
- int i, j;
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
@@ -666,30 +632,6 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
-
- if (!is_rd890_iommu(iommu->dev))
- return;
-
- /*
- * Some rd890 systems may not be fully reconfigured by the BIOS, so
- * it's necessary for us to store this information so it can be
- * reprogrammed on resume
- */
-
- pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
- &iommu->stored_addr_lo);
- pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
- &iommu->stored_addr_hi);
-
- /* Low bit locks writes to configuration space */
- iommu->stored_addr_lo &= ~1;
-
- for (i = 0; i < 6; i++)
- for (j = 0; j < 0x12; j++)
- iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
-
- for (i = 0; i < 0x83; i++)
- iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
/*
@@ -707,9 +649,29 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
struct ivhd_entry *e;
/*
- * First save the recommended feature enable bits from ACPI
+ * First set the recommended feature enable bits from ACPI
+ * into the IOMMU control registers
+ */
+ h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+ h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+ h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+ h->flags & IVHD_FLAG_ISOC_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+ /*
+ * make IOMMU memory accesses cache coherent
*/
- iommu->acpi_flags = h->flags;
+ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
/*
* Done. Now parse the device entries
@@ -1154,79 +1116,6 @@ static void init_device_table(void)
}
}
-static void iommu_init_flags(struct amd_iommu *iommu)
-{
- iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
- iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
-
- iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
-
- iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
- iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
-
- iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
- iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
- iommu_feature_disable(iommu, CONTROL_ISOC_EN);
-
- /*
- * make IOMMU memory accesses cache coherent
- */
- iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
-}
-
-static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
-{
- int i, j;
- u32 ioc_feature_control;
- struct pci_dev *pdev = NULL;
-
- /* RD890 BIOSes may not have completely reconfigured the iommu */
- if (!is_rd890_iommu(iommu->dev))
- return;
-
- /*
- * First, we need to ensure that the iommu is enabled. This is
- * controlled by a register in the northbridge
- */
- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
- if (!pdev)
- return;
-
- /* Select Northbridge indirect register 0x75 and enable writing */
- pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
- pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
-
- /* Enable the iommu */
- if (!(ioc_feature_control & 0x1))
- pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
-
- pci_dev_put(pdev);
-
- /* Restore the iommu BAR */
- pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
- iommu->stored_addr_lo);
- pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
- iommu->stored_addr_hi);
-
- /* Restore the l1 indirect regs for each of the 6 l1s */
- for (i = 0; i < 6; i++)
- for (j = 0; j < 0x12; j++)
- iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
-
- /* Restore the l2 indirect regs */
- for (i = 0; i < 0x83; i++)
- iommu_write_l2(iommu, i, iommu->stored_l2[i]);
-
- /* Lock PCI setup registers */
- pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
- iommu->stored_addr_lo | 1);
-}
-
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
@@ -1237,7 +1126,6 @@ static void enable_iommus(void)
for_each_iommu(iommu) {
iommu_disable(iommu);
- iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
@@ -1262,11 +1150,6 @@ static void disable_iommus(void)
static int amd_iommu_resume(struct sys_device *dev)
{
- struct amd_iommu *iommu;
-
- for_each_iommu(iommu)
- iommu_apply_resume_quirks(iommu);
-
/* re-load the hardware */
enable_iommus();
diff --git a/trunk/arch/x86/kernel/apb_timer.c b/trunk/arch/x86/kernel/apb_timer.c
index 6fe2b5cb4f3c..8dd77800ff5d 100644
--- a/trunk/arch/x86/kernel/apb_timer.c
+++ b/trunk/arch/x86/kernel/apb_timer.c
@@ -343,7 +343,7 @@ void apbt_setup_secondary_clock(void)
/* Don't register boot CPU clockevent */
cpu = smp_processor_id();
- if (!cpu)
+ if (cpu == boot_cpu_id)
return;
/*
* We need to calculate the scaled math multiplication factor for
@@ -398,7 +398,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
}
break;
default:
- pr_debug("APBT notified %lu, no action\n", action);
+ pr_debug(KERN_INFO "APBT notified %lu, no action\n", action);
}
return NOTIFY_OK;
}
@@ -552,7 +552,7 @@ static cycle_t apbt_read_clocksource(struct clocksource *cs)
pr_debug("APB CS going back %lx:%lx:%lx ",
t2, last_read, t2 - last_read);
bad_count_x3:
- pr_debug("triple check enforced\n");
+ pr_debug(KERN_INFO "tripple check enforced\n");
t0 = apbt_readl(phy_cs_timer_id,
APBTMR_N_CURRENT_VALUE);
udelay(1);
diff --git a/trunk/arch/x86/kernel/aperture_64.c b/trunk/arch/x86/kernel/aperture_64.c
index 377f5db3b8b4..a2e0caf26e17 100644
--- a/trunk/arch/x86/kernel/aperture_64.c
+++ b/trunk/arch/x86/kernel/aperture_64.c
@@ -27,7 +27,7 @@
#include
#include