diff --git a/[refs] b/[refs]
index d83ed1ea9920..11e8703de81d 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 025be81e83043f20538dcced1e12c5f8d152fbdb
+refs/heads/master: 4b75679f60d0ce780609cbff249769b669f4fb69
diff --git a/trunk/Documentation/DocBook/libata.tmpl b/trunk/Documentation/DocBook/libata.tmpl
index d260d92089ad..5bcbb6ee3bc0 100644
--- a/trunk/Documentation/DocBook/libata.tmpl
+++ b/trunk/Documentation/DocBook/libata.tmpl
@@ -120,14 +120,27 @@ void (*dev_config) (struct ata_port *, struct ata_device *);
void (*set_piomode) (struct ata_port *, struct ata_device *);
void (*set_dmamode) (struct ata_port *, struct ata_device *);
-void (*post_set_mode) (struct ata_port *ap);
+void (*post_set_mode) (struct ata_port *);
+unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned int);
Hooks called prior to the issue of SET FEATURES - XFER MODE
- command. dev->pio_mode is guaranteed to be valid when
- ->set_piomode() is called, and dev->dma_mode is guaranteed to be
- valid when ->set_dmamode() is called. ->post_set_mode() is
+ command. The optional ->mode_filter() hook is called when libata
+ has built a mask of the possible modes. This is passed to the
+ ->mode_filter() function which should return a mask of valid modes
+ after filtering those unsuitable due to hardware limits. It is not
+ valid to use this interface to add modes.
+
+
+ dev->pio_mode and dev->dma_mode are guaranteed to be valid when
+ ->set_piomode() and when ->set_dmamode() is called. The timings for
+ any other drive sharing the cable will also be valid at this point.
+ That is the library records the decisions for the modes of each
+ drive on a channel before it attempts to set any of them.
+
+
+ ->post_set_mode() is
called unconditionally, after the SET FEATURES - XFER MODE
command completes successfully.
@@ -230,6 +243,32 @@ void (*dev_select)(struct ata_port *ap, unsigned int device);
+ Private tuning method
+
+void (*set_mode) (struct ata_port *ap);
+
+
+
+ By default libata performs drive and controller tuning in
+ accordance with the ATA timing rules and also applies blacklists
+ and cable limits. Some controllers need special handling and have
+ custom tuning rules, typically raid controllers that use ATA
+ commands but do not actually do drive timing.
+
+
+
+
+ This hook should not be used to replace the standard controller
+ tuning logic when a controller has quirks. Replacing the default
+ tuning logic in that case would bypass handling for drive and
+ bridge quirks that may be important to data reliability. If a
+ controller needs to filter the mode selection it should use the
+ mode_filter hook instead.
+
+
+
+
+
Reset ATA bus
void (*phy_reset) (struct ata_port *ap);
diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt
index 495858b236b6..59d0c74c79c9 100644
--- a/trunk/Documentation/feature-removal-schedule.txt
+++ b/trunk/Documentation/feature-removal-schedule.txt
@@ -127,13 +127,6 @@ Who: Christoph Hellwig
---------------------------
-What: EXPORT_SYMBOL(lookup_hash)
-When: January 2006
-Why: Too low-level interface. Use lookup_one_len or lookup_create instead.
-Who: Christoph Hellwig
-
----------------------------
-
What: CONFIG_FORCED_INLINING
When: June 2006
Why: Config option is there to see if gcc is good enough. (in january
@@ -241,3 +234,15 @@ Why: The USB subsystem has changed a lot over time, and it has been
Who: Greg Kroah-Hartman
---------------------------
+
+What: find_trylock_page
+When: January 2007
+Why: The interface no longer has any callers left in the kernel. It
+ is an odd interface (compared with other find_*_page functions), in
+ that it does not take a refcount to the page, only the page lock.
+ It should be replaced with find_get_page or find_lock_page if possible.
+ This feature removal can be reevaluated if users of the interface
+ cannot cleanly use something else.
+Who: Nick Piggin
+
+---------------------------
diff --git a/trunk/Documentation/leds-class.txt b/trunk/Documentation/leds-class.txt
new file mode 100644
index 000000000000..8c35c0426110
--- /dev/null
+++ b/trunk/Documentation/leds-class.txt
@@ -0,0 +1,71 @@
+LED handling under Linux
+========================
+
+If you're reading this and thinking about keyboard leds, these are
+handled by the input subsystem and the led class is *not* needed.
+
+In its simplest form, the LED class just allows control of LEDs from
+userspace. LEDs appear in /sys/class/leds/. The brightness file will
+set the brightness of the LED (taking a value 0-255). Most LEDs don't
+have hardware brightness support so will just be turned on for non-zero
+brightness settings.
+
+The class also introduces the optional concept of an LED trigger. A trigger
+is a kernel based source of led events. Triggers can either be simple or
+complex. A simple trigger isn't configurable and is designed to slot into
+existing subsystems with minimal additional code. Examples are the ide-disk,
+nand-disk and sharpsl-charge triggers. With led triggers disabled, the code
+optimises away.
+
+Complex triggers whilst available to all LEDs have LED specific
+parameters and work on a per LED basis. The timer trigger is an example.
+
+You can change triggers in a similar manner to the way an IO scheduler
+is chosen (via /sys/class/leds//trigger). Trigger specific
+parameters can appear in /sys/class/leds/ once a given trigger is
+selected.
+
+
+Design Philosophy
+=================
+
+The underlying design philosophy is simplicity. LEDs are simple devices
+and the aim is to keep a small amount of code giving as much functionality
+as possible. Please keep this in mind when suggesting enhancements.
+
+
+LED Device Naming
+=================
+
+Is currently of the form:
+
+"devicename:colour"
+
+There have been calls for LED properties such as colour to be exported as
+individual led class attributes. As a solution which doesn't incur as much
+overhead, I suggest these become part of the device name. The naming scheme
+above leaves scope for further attributes should they be needed.
+
+
+Known Issues
+============
+
+The LED Trigger core cannot be a module as the simple trigger functions
+would cause nightmare dependency issues. I see this as a minor issue
+compared to the benefits the simple trigger functionality brings. The
+rest of the LED subsystem can be modular.
+
+Some leds can be programmed to flash in hardware. As this isn't a generic
+LED device property, this should be exported as a device specific sysfs
+attribute rather than part of the class if this functionality is required.
+
+
+Future Development
+==================
+
+At the moment, a trigger can't be created specifically for a single LED.
+There are a number of cases where a trigger might only be mappable to a
+particular LED (ACPI?). The addition of triggers provided by the LED driver
+should cover this option and be possible to add without breaking the
+current interface.
+
diff --git a/trunk/Documentation/memory-barriers.txt b/trunk/Documentation/memory-barriers.txt
new file mode 100644
index 000000000000..f8550310a6d5
--- /dev/null
+++ b/trunk/Documentation/memory-barriers.txt
@@ -0,0 +1,1913 @@
+ ============================
+ LINUX KERNEL MEMORY BARRIERS
+ ============================
+
+By: David Howells
+
+Contents:
+
+ (*) Abstract memory access model.
+
+ - Device operations.
+ - Guarantees.
+
+ (*) What are memory barriers?
+
+ - Varieties of memory barrier.
+ - What may not be assumed about memory barriers?
+ - Data dependency barriers.
+ - Control dependencies.
+ - SMP barrier pairing.
+ - Examples of memory barrier sequences.
+
+ (*) Explicit kernel barriers.
+
+ - Compiler barrier.
+ - The CPU memory barriers.
+ - MMIO write barrier.
+
+ (*) Implicit kernel memory barriers.
+
+ - Locking functions.
+ - Interrupt disabling functions.
+ - Miscellaneous functions.
+
+ (*) Inter-CPU locking barrier effects.
+
+ - Locks vs memory accesses.
+ - Locks vs I/O accesses.
+
+ (*) Where are memory barriers needed?
+
+ - Interprocessor interaction.
+ - Atomic operations.
+ - Accessing devices.
+ - Interrupts.
+
+ (*) Kernel I/O barrier effects.
+
+ (*) Assumed minimum execution ordering model.
+
+ (*) The effects of the cpu cache.
+
+ - Cache coherency.
+ - Cache coherency vs DMA.
+ - Cache coherency vs MMIO.
+
+ (*) The things CPUs get up to.
+
+ - And then there's the Alpha.
+
+ (*) References.
+
+
+============================
+ABSTRACT MEMORY ACCESS MODEL
+============================
+
+Consider the following abstract model of the system:
+
+ : :
+ : :
+ : :
+ +-------+ : +--------+ : +-------+
+ | | : | | : | |
+ | | : | | : | |
+ | CPU 1 |<----->| Memory |<----->| CPU 2 |
+ | | : | | : | |
+ | | : | | : | |
+ +-------+ : +--------+ : +-------+
+ ^ : ^ : ^
+ | : | : |
+ | : | : |
+ | : v : |
+ | : +--------+ : |
+ | : | | : |
+ | : | | : |
+ +---------->| Device |<----------+
+ : | | :
+ : | | :
+ : +--------+ :
+ : :
+
+Each CPU executes a program that generates memory access operations. In the
+abstract CPU, memory operation ordering is very relaxed, and a CPU may actually
+perform the memory operations in any order it likes, provided program causality
+appears to be maintained. Similarly, the compiler may also arrange the
+instructions it emits in any order it likes, provided it doesn't affect the
+apparent operation of the program.
+
+So in the above diagram, the effects of the memory operations performed by a
+CPU are perceived by the rest of the system as the operations cross the
+interface between the CPU and rest of the system (the dotted lines).
+
+
+For example, consider the following sequence of events:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1; B == 2 }
+ A = 3; x = A;
+ B = 4; y = B;
+
+The set of accesses as seen by the memory system in the middle can be arranged
+in 24 different combinations:
+
+ STORE A=3, STORE B=4, x=LOAD A->3, y=LOAD B->4
+ STORE A=3, STORE B=4, y=LOAD B->4, x=LOAD A->3
+ STORE A=3, x=LOAD A->3, STORE B=4, y=LOAD B->4
+ STORE A=3, x=LOAD A->3, y=LOAD B->2, STORE B=4
+ STORE A=3, y=LOAD B->2, STORE B=4, x=LOAD A->3
+ STORE A=3, y=LOAD B->2, x=LOAD A->3, STORE B=4
+ STORE B=4, STORE A=3, x=LOAD A->3, y=LOAD B->4
+ STORE B=4, ...
+ ...
+
+and can thus result in four different combinations of values:
+
+ x == 1, y == 2
+ x == 1, y == 4
+ x == 3, y == 2
+ x == 3, y == 4
+
+
+Furthermore, the stores committed by a CPU to the memory system may not be
+perceived by the loads made by another CPU in the same order as the stores were
+committed.
+
+
+As a further example, consider this sequence of events:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1, B == 2, C = 3, P == &A, Q == &C }
+ B = 4; Q = P;
+ P = &B D = *Q;
+
+There is an obvious data dependency here, as the value loaded into D depends on
+the address retrieved from P by CPU 2. At the end of the sequence, any of the
+following results are possible:
+
+ (Q == &A) and (D == 1)
+ (Q == &B) and (D == 2)
+ (Q == &B) and (D == 4)
+
+Note that CPU 2 will never try and load C into D because the CPU will load P
+into Q before issuing the load of *Q.
+
+
+DEVICE OPERATIONS
+-----------------
+
+Some devices present their control interfaces as collections of memory
+locations, but the order in which the control registers are accessed is very
+important. For instance, imagine an ethernet card with a set of internal
+registers that are accessed through an address port register (A) and a data
+port register (D). To read internal register 5, the following code might then
+be used:
+
+ *A = 5;
+ x = *D;
+
+but this might show up as either of the following two sequences:
+
+ STORE *A = 5, x = LOAD *D
+ x = LOAD *D, STORE *A = 5
+
+the second of which will almost certainly result in a malfunction, since it set
+the address _after_ attempting to read the register.
+
+
+GUARANTEES
+----------
+
+There are some minimal guarantees that may be expected of a CPU:
+
+ (*) On any given CPU, dependent memory accesses will be issued in order, with
+ respect to itself. This means that for:
+
+ Q = P; D = *Q;
+
+ the CPU will issue the following memory operations:
+
+ Q = LOAD P, D = LOAD *Q
+
+ and always in that order.
+
+ (*) Overlapping loads and stores within a particular CPU will appear to be
+ ordered within that CPU. This means that for:
+
+ a = *X; *X = b;
+
+ the CPU will only issue the following sequence of memory operations:
+
+ a = LOAD *X, STORE *X = b
+
+ And for:
+
+ *X = c; d = *X;
+
+ the CPU will only issue:
+
+ STORE *X = c, d = LOAD *X
+
+ (Loads and stores overlap if they are targetted at overlapping pieces of
+ memory).
+
+And there are a number of things that _must_ or _must_not_ be assumed:
+
+ (*) It _must_not_ be assumed that independent loads and stores will be issued
+ in the order given. This means that for:
+
+ X = *A; Y = *B; *D = Z;
+
+ we may get any of the following sequences:
+
+ X = LOAD *A, Y = LOAD *B, STORE *D = Z
+ X = LOAD *A, STORE *D = Z, Y = LOAD *B
+ Y = LOAD *B, X = LOAD *A, STORE *D = Z
+ Y = LOAD *B, STORE *D = Z, X = LOAD *A
+ STORE *D = Z, X = LOAD *A, Y = LOAD *B
+ STORE *D = Z, Y = LOAD *B, X = LOAD *A
+
+ (*) It _must_ be assumed that overlapping memory accesses may be merged or
+ discarded. This means that for:
+
+ X = *A; Y = *(A + 4);
+
+ we may get any one of the following sequences:
+
+ X = LOAD *A; Y = LOAD *(A + 4);
+ Y = LOAD *(A + 4); X = LOAD *A;
+ {X, Y} = LOAD {*A, *(A + 4) };
+
+ And for:
+
+ *A = X; Y = *A;
+
+ we may get either of:
+
+ STORE *A = X; Y = LOAD *A;
+ STORE *A = Y;
+
+
+=========================
+WHAT ARE MEMORY BARRIERS?
+=========================
+
+As can be seen above, independent memory operations are effectively performed
+in random order, but this can be a problem for CPU-CPU interaction and for I/O.
+What is required is some way of intervening to instruct the compiler and the
+CPU to restrict the order.
+
+Memory barriers are such interventions. They impose a perceived partial
+ordering between the memory operations specified on either side of the barrier.
+They request that the sequence of memory events generated appears to other
+parts of the system as if the barrier is effective on that CPU.
+
+
+VARIETIES OF MEMORY BARRIER
+---------------------------
+
+Memory barriers come in four basic varieties:
+
+ (1) Write (or store) memory barriers.
+
+ A write memory barrier gives a guarantee that all the STORE operations
+ specified before the barrier will appear to happen before all the STORE
+ operations specified after the barrier with respect to the other
+ components of the system.
+
+ A write barrier is a partial ordering on stores only; it is not required
+ to have any effect on loads.
+
+ A CPU can be viewed as as commiting a sequence of store operations to the
+ memory system as time progresses. All stores before a write barrier will
+ occur in the sequence _before_ all the stores after the write barrier.
+
+ [!] Note that write barriers should normally be paired with read or data
+ dependency barriers; see the "SMP barrier pairing" subsection.
+
+
+ (2) Data dependency barriers.
+
+ A data dependency barrier is a weaker form of read barrier. In the case
+ where two loads are performed such that the second depends on the result
+ of the first (eg: the first load retrieves the address to which the second
+ load will be directed), a data dependency barrier would be required to
+ make sure that the target of the second load is updated before the address
+ obtained by the first load is accessed.
+
+ A data dependency barrier is a partial ordering on interdependent loads
+ only; it is not required to have any effect on stores, independent loads
+ or overlapping loads.
+
+ As mentioned in (1), the other CPUs in the system can be viewed as
+ committing sequences of stores to the memory system that the CPU being
+ considered can then perceive. A data dependency barrier issued by the CPU
+ under consideration guarantees that for any load preceding it, if that
+ load touches one of a sequence of stores from another CPU, then by the
+ time the barrier completes, the effects of all the stores prior to that
+ touched by the load will be perceptible to any loads issued after the data
+ dependency barrier.
+
+ See the "Examples of memory barrier sequences" subsection for diagrams
+ showing the ordering constraints.
+
+ [!] Note that the first load really has to have a _data_ dependency and
+ not a control dependency. If the address for the second load is dependent
+ on the first load, but the dependency is through a conditional rather than
+ actually loading the address itself, then it's a _control_ dependency and
+ a full read barrier or better is required. See the "Control dependencies"
+ subsection for more information.
+
+ [!] Note that data dependency barriers should normally be paired with
+ write barriers; see the "SMP barrier pairing" subsection.
+
+
+ (3) Read (or load) memory barriers.
+
+ A read barrier is a data dependency barrier plus a guarantee that all the
+ LOAD operations specified before the barrier will appear to happen before
+ all the LOAD operations specified after the barrier with respect to the
+ other components of the system.
+
+ A read barrier is a partial ordering on loads only; it is not required to
+ have any effect on stores.
+
+ Read memory barriers imply data dependency barriers, and so can substitute
+ for them.
+
+ [!] Note that read barriers should normally be paired with write barriers;
+ see the "SMP barrier pairing" subsection.
+
+
+ (4) General memory barriers.
+
+ A general memory barrier is a combination of both a read memory barrier
+ and a write memory barrier. It is a partial ordering over both loads and
+ stores.
+
+ General memory barriers imply both read and write memory barriers, and so
+ can substitute for either.
+
+
+And a couple of implicit varieties:
+
+ (5) LOCK operations.
+
+ This acts as a one-way permeable barrier. It guarantees that all memory
+ operations after the LOCK operation will appear to happen after the LOCK
+ operation with respect to the other components of the system.
+
+ Memory operations that occur before a LOCK operation may appear to happen
+ after it completes.
+
+ A LOCK operation should almost always be paired with an UNLOCK operation.
+
+
+ (6) UNLOCK operations.
+
+ This also acts as a one-way permeable barrier. It guarantees that all
+ memory operations before the UNLOCK operation will appear to happen before
+ the UNLOCK operation with respect to the other components of the system.
+
+ Memory operations that occur after an UNLOCK operation may appear to
+ happen before it completes.
+
+ LOCK and UNLOCK operations are guaranteed to appear with respect to each
+ other strictly in the order specified.
+
+ The use of LOCK and UNLOCK operations generally precludes the need for
+ other sorts of memory barrier (but note the exceptions mentioned in the
+ subsection "MMIO write barrier").
+
+
+Memory barriers are only required where there's a possibility of interaction
+between two CPUs or between a CPU and a device. If it can be guaranteed that
+there won't be any such interaction in any particular piece of code, then
+memory barriers are unnecessary in that piece of code.
+
+
+Note that these are the _minimum_ guarantees. Different architectures may give
+more substantial guarantees, but they may _not_ be relied upon outside of arch
+specific code.
+
+
+WHAT MAY NOT BE ASSUMED ABOUT MEMORY BARRIERS?
+----------------------------------------------
+
+There are certain things that the Linux kernel memory barriers do not guarantee:
+
+ (*) There is no guarantee that any of the memory accesses specified before a
+ memory barrier will be _complete_ by the completion of a memory barrier
+ instruction; the barrier can be considered to draw a line in that CPU's
+ access queue that accesses of the appropriate type may not cross.
+
+ (*) There is no guarantee that issuing a memory barrier on one CPU will have
+ any direct effect on another CPU or any other hardware in the system. The
+ indirect effect will be the order in which the second CPU sees the effects
+ of the first CPU's accesses occur, but see the next point:
+
+ (*) There is no guarantee that the a CPU will see the correct order of effects
+ from a second CPU's accesses, even _if_ the second CPU uses a memory
+ barrier, unless the first CPU _also_ uses a matching memory barrier (see
+ the subsection on "SMP Barrier Pairing").
+
+ (*) There is no guarantee that some intervening piece of off-the-CPU
+ hardware[*] will not reorder the memory accesses. CPU cache coherency
+ mechanisms should propagate the indirect effects of a memory barrier
+ between CPUs, but might not do so in order.
+
+ [*] For information on bus mastering DMA and coherency please read:
+
+ Documentation/pci.txt
+ Documentation/DMA-mapping.txt
+ Documentation/DMA-API.txt
+
+
+DATA DEPENDENCY BARRIERS
+------------------------
+
+The usage requirements of data dependency barriers are a little subtle, and
+it's not always obvious that they're needed. To illustrate, consider the
+following sequence of events:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1, B == 2, C = 3, P == &A, Q == &C }
+ B = 4;
+
+ P = &B
+ Q = P;
+ D = *Q;
+
+There's a clear data dependency here, and it would seem that by the end of the
+sequence, Q must be either &A or &B, and that:
+
+ (Q == &A) implies (D == 1)
+ (Q == &B) implies (D == 4)
+
+But! CPU 2's perception of P may be updated _before_ its perception of B, thus
+leading to the following situation:
+
+ (Q == &B) and (D == 2) ????
+
+Whilst this may seem like a failure of coherency or causality maintenance, it
+isn't, and this behaviour can be observed on certain real CPUs (such as the DEC
+Alpha).
+
+To deal with this, a data dependency barrier must be inserted between the
+address load and the data load:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { A == 1, B == 2, C = 3, P == &A, Q == &C }
+ B = 4;
+
+ P = &B
+ Q = P;
+
+ D = *Q;
+
+This enforces the occurrence of one of the two implications, and prevents the
+third possibility from arising.
+
+[!] Note that this extremely counterintuitive situation arises most easily on
+machines with split caches, so that, for example, one cache bank processes
+even-numbered cache lines and the other bank processes odd-numbered cache
+lines. The pointer P might be stored in an odd-numbered cache line, and the
+variable B might be stored in an even-numbered cache line. Then, if the
+even-numbered bank of the reading CPU's cache is extremely busy while the
+odd-numbered bank is idle, one can see the new value of the pointer P (&B),
+but the old value of the variable B (1).
+
+
+Another example of where data dependency barriers might by required is where a
+number is read from memory and then used to calculate the index for an array
+access:
+
+ CPU 1 CPU 2
+ =============== ===============
+ { M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
+ M[1] = 4;
+
+ P = 1
+ Q = P;
+
+ D = M[Q];
+
+
+The data dependency barrier is very important to the RCU system, for example.
+See rcu_dereference() in include/linux/rcupdate.h. This permits the current
+target of an RCU'd pointer to be replaced with a new modified target, without
+the replacement target appearing to be incompletely initialised.
+
+See also the subsection on "Cache Coherency" for a more thorough example.
+
+
+CONTROL DEPENDENCIES
+--------------------
+
+A control dependency requires a full read memory barrier, not simply a data
+dependency barrier to make it work correctly. Consider the following bit of
+code:
+
+ q = &a;
+ if (p)
+ q = &b;
+
+ x = *q;
+
+This will not have the desired effect because there is no actual data
+dependency, but rather a control dependency that the CPU may short-circuit by
+attempting to predict the outcome in advance. In such a case what's actually
+required is:
+
+ q = &a;
+ if (p)
+ q = &b;
+
+ x = *q;
+
+
+SMP BARRIER PAIRING
+-------------------
+
+When dealing with CPU-CPU interactions, certain types of memory barrier should
+always be paired. A lack of appropriate pairing is almost certainly an error.
+
+A write barrier should always be paired with a data dependency barrier or read
+barrier, though a general barrier would also be viable. Similarly a read
+barrier or a data dependency barrier should always be paired with at least an
+write barrier, though, again, a general barrier is viable:
+
+ CPU 1 CPU 2
+ =============== ===============
+ a = 1;
+
+ b = 2; x = a;
+
+ y = b;
+
+Or:
+
+ CPU 1 CPU 2
+ =============== ===============================
+ a = 1;
+
+ b = &a; x = b;
+
+ y = *x;
+
+Basically, the read barrier always has to be there, even though it can be of
+the "weaker" type.
+
+
+EXAMPLES OF MEMORY BARRIER SEQUENCES
+------------------------------------
+
+Firstly, write barriers act as a partial orderings on store operations.
+Consider the following sequence of events:
+
+ CPU 1
+ =======================
+ STORE A = 1
+ STORE B = 2
+ STORE C = 3
+
+ STORE D = 4
+ STORE E = 5
+
+This sequence of events is committed to the memory coherence system in an order
+that the rest of the system might perceive as the unordered set of { STORE A,
+STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
+}:
+
+ +-------+ : :
+ | | +------+
+ | |------>| C=3 | } /\
+ | | : +------+ }----- \ -----> Events perceptible
+ | | : | A=1 | } \/ to rest of system
+ | | : +------+ }
+ | CPU 1 | : | B=2 | }
+ | | +------+ }
+ | | wwwwwwwwwwwwwwww } <--- At this point the write barrier
+ | | +------+ } requires all stores prior to the
+ | | : | E=5 | } barrier to be committed before
+ | | : +------+ } further stores may be take place.
+ | |------>| D=4 | }
+ | | +------+
+ +-------+ : :
+ |
+ | Sequence in which stores committed to memory system
+ | by CPU 1
+ V
+
+
+Secondly, data dependency barriers act as a partial orderings on data-dependent
+loads. Consider the following sequence of events:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ STORE A = 1
+ STORE B = 2
+
+ STORE C = &B LOAD X
+ STORE D = 4 LOAD C (gets &B)
+ LOAD *C (reads B)
+
+Without intervention, CPU 2 may perceive the events on CPU 1 in some
+effectively random order, despite the write barrier issued by CPU 1:
+
+ +-------+ : : : :
+ | | +------+ +-------+ | Sequence of update
+ | |------>| B=2 |----- --->| Y->8 | | of perception on
+ | | : +------+ \ +-------+ | CPU 2
+ | CPU 1 | : | A=1 | \ --->| C->&Y | V
+ | | +------+ | +-------+
+ | | wwwwwwwwwwwwwwww | : :
+ | | +------+ | : :
+ | | : | C=&B |--- | : : +-------+
+ | | : +------+ \ | +-------+ | |
+ | |------>| D=4 | ----------->| C->&B |------>| |
+ | | +------+ | +-------+ | |
+ +-------+ : : | : : | |
+ | : : | |
+ | : : | CPU 2 |
+ | +-------+ | |
+ Apparently incorrect ---> | | B->7 |------>| |
+ perception of B (!) | +-------+ | |
+ | : : | |
+ | +-------+ | |
+ The load of X holds ---> \ | X->9 |------>| |
+ up the maintenance \ +-------+ | |
+ of coherence of B ----->| B->2 | +-------+
+ +-------+
+ : :
+
+
+In the above example, CPU 2 perceives that B is 7, despite the load of *C
+(which would be B) coming after the the LOAD of C.
+
+If, however, a data dependency barrier were to be placed between the load of C
+and the load of *C (ie: B) on CPU 2, then the following will occur:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| B=2 |----- --->| Y->8 |
+ | | : +------+ \ +-------+
+ | CPU 1 | : | A=1 | \ --->| C->&Y |
+ | | +------+ | +-------+
+ | | wwwwwwwwwwwwwwww | : :
+ | | +------+ | : :
+ | | : | C=&B |--- | : : +-------+
+ | | : +------+ \ | +-------+ | |
+ | |------>| D=4 | ----------->| C->&B |------>| |
+ | | +------+ | +-------+ | |
+ +-------+ : : | : : | |
+ | : : | |
+ | : : | CPU 2 |
+ | +-------+ | |
+ \ | X->9 |------>| |
+ \ +-------+ | |
+ ----->| B->2 | | |
+ +-------+ | |
+ Makes sure all effects ---> ddddddddddddddddd | |
+ prior to the store of C +-------+ | |
+ are perceptible to | B->2 |------>| |
+ successive loads +-------+ | |
+ : : +-------+
+
+
+And thirdly, a read barrier acts as a partial order on loads. Consider the
+following sequence of events:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ STORE A=1
+ STORE B=2
+ STORE C=3
+
+ STORE D=4
+ STORE E=5
+ LOAD A
+ LOAD B
+ LOAD C
+ LOAD D
+ LOAD E
+
+Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
+some effectively random order, despite the write barrier issued by CPU 1:
+
+ +-------+ : :
+ | | +------+
+ | |------>| C=3 | }
+ | | : +------+ }
+ | | : | A=1 | }
+ | | : +------+ }
+ | CPU 1 | : | B=2 | }---
+ | | +------+ } \
+ | | wwwwwwwwwwwww} \
+ | | +------+ } \ : : +-------+
+ | | : | E=5 | } \ +-------+ | |
+ | | : +------+ } \ { | C->3 |------>| |
+ | |------>| D=4 | } \ { +-------+ : | |
+ | | +------+ \ { | E->5 | : | |
+ +-------+ : : \ { +-------+ : | |
+ Transfer -->{ | A->1 | : | CPU 2 |
+ from CPU 1 { +-------+ : | |
+ to CPU 2 { | D->4 | : | |
+ { +-------+ : | |
+ { | B->2 |------>| |
+ +-------+ | |
+ : : +-------+
+
+
+If, however, a read barrier were to be placed between the load of C and the
+load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
+perceived correctly by CPU 2.
+
+ +-------+ : :
+ | | +------+
+ | |------>| C=3 | }
+ | | : +------+ }
+ | | : | A=1 | }---
+ | | : +------+ } \
+ | CPU 1 | : | B=2 | } \
+ | | +------+ \
+ | | wwwwwwwwwwwwwwww \
+ | | +------+ \ : : +-------+
+ | | : | E=5 | } \ +-------+ | |
+ | | : +------+ }--- \ { | C->3 |------>| |
+ | |------>| D=4 | } \ \ { +-------+ : | |
+ | | +------+ \ -->{ | B->2 | : | |
+ +-------+ : : \ { +-------+ : | |
+ \ { | A->1 | : | CPU 2 |
+ \ +-------+ | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of C \ { | E->5 | : | |
+ to be perceptible to CPU 2 -->{ +-------+ : | |
+ { | D->4 |------>| |
+ +-------+ | |
+ : : +-------+
+
+
+========================
+EXPLICIT KERNEL BARRIERS
+========================
+
+The Linux kernel has a variety of different barriers that act at different
+levels:
+
+ (*) Compiler barrier.
+
+ (*) CPU memory barriers.
+
+ (*) MMIO write barrier.
+
+
+COMPILER BARRIER
+----------------
+
+The Linux kernel has an explicit compiler barrier function that prevents the
+compiler from moving the memory accesses either side of it to the other side:
+
+ barrier();
+
+This a general barrier - lesser varieties of compiler barrier do not exist.
+
+The compiler barrier has no direct effect on the CPU, which may then reorder
+things however it wishes.
+
+
+CPU MEMORY BARRIERS
+-------------------
+
+The Linux kernel has eight basic CPU memory barriers:
+
+ TYPE MANDATORY SMP CONDITIONAL
+ =============== ======================= ===========================
+ GENERAL mb() smp_mb()
+ WRITE wmb() smp_wmb()
+ READ rmb() smp_rmb()
+ DATA DEPENDENCY read_barrier_depends() smp_read_barrier_depends()
+
+
+All CPU memory barriers unconditionally imply compiler barriers.
+
+SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
+systems because it is assumed that a CPU will be appear to be self-consistent,
+and will order overlapping accesses correctly with respect to itself.
+
+[!] Note that SMP memory barriers _must_ be used to control the ordering of
+references to shared memory on SMP systems, though the use of locking instead
+is sufficient.
+
+Mandatory barriers should not be used to control SMP effects, since mandatory
+barriers unnecessarily impose overhead on UP systems. They may, however, be
+used to control MMIO effects on accesses through relaxed memory I/O windows.
+These are required even on non-SMP systems as they affect the order in which
+memory operations appear to a device by prohibiting both the compiler and the
+CPU from reordering them.
+
+
+There are some more advanced barrier functions:
+
+ (*) set_mb(var, value)
+ (*) set_wmb(var, value)
+
+ These assign the value to the variable and then insert at least a write
+ barrier after it, depending on the function. They aren't guaranteed to
+ insert anything more than a compiler barrier in a UP compilation.
+
+
+ (*) smp_mb__before_atomic_dec();
+ (*) smp_mb__after_atomic_dec();
+ (*) smp_mb__before_atomic_inc();
+ (*) smp_mb__after_atomic_inc();
+
+ These are for use with atomic add, subtract, increment and decrement
+ functions, especially when used for reference counting. These functions
+ do not imply memory barriers.
+
+ As an example, consider a piece of code that marks an object as being dead
+ and then decrements the object's reference count:
+
+ obj->dead = 1;
+ smp_mb__before_atomic_dec();
+ atomic_dec(&obj->ref_count);
+
+ This makes sure that the death mark on the object is perceived to be set
+ *before* the reference counter is decremented.
+
+ See Documentation/atomic_ops.txt for more information. See the "Atomic
+ operations" subsection for information on where to use these.
+
+
+ (*) smp_mb__before_clear_bit(void);
+ (*) smp_mb__after_clear_bit(void);
+
+ These are for use similar to the atomic inc/dec barriers. These are
+ typically used for bitwise unlocking operations, so care must be taken as
+ there are no implicit memory barriers here either.
+
+ Consider implementing an unlock operation of some nature by clearing a
+ locking bit. The clear_bit() would then need to be barriered like this:
+
+ smp_mb__before_clear_bit();
+ clear_bit( ... );
+
+ This prevents memory operations before the clear leaking to after it. See
+ the subsection on "Locking Functions" with reference to UNLOCK operation
+ implications.
+
+ See Documentation/atomic_ops.txt for more information. See the "Atomic
+ operations" subsection for information on where to use these.
+
+
+MMIO WRITE BARRIER
+------------------
+
+The Linux kernel also has a special barrier for use with memory-mapped I/O
+writes:
+
+ mmiowb();
+
+This is a variation on the mandatory write barrier that causes writes to weakly
+ordered I/O regions to be partially ordered. Its effects may go beyond the
+CPU->Hardware interface and actually affect the hardware at some level.
+
+See the subsection "Locks vs I/O accesses" for more information.
+
+
+===============================
+IMPLICIT KERNEL MEMORY BARRIERS
+===============================
+
+Some of the other functions in the linux kernel imply memory barriers, amongst
+which are locking, scheduling and memory allocation functions.
+
+This specification is a _minimum_ guarantee; any particular architecture may
+provide more substantial guarantees, but these may not be relied upon outside
+of arch specific code.
+
+
+LOCKING FUNCTIONS
+-----------------
+
+The Linux kernel has a number of locking constructs:
+
+ (*) spin locks
+ (*) R/W spin locks
+ (*) mutexes
+ (*) semaphores
+ (*) R/W semaphores
+ (*) RCU
+
+In all cases there are variants on "LOCK" operations and "UNLOCK" operations
+for each construct. These operations all imply certain barriers:
+
+ (1) LOCK operation implication:
+
+ Memory operations issued after the LOCK will be completed after the LOCK
+ operation has completed.
+
+ Memory operations issued before the LOCK may be completed after the LOCK
+ operation has completed.
+
+ (2) UNLOCK operation implication:
+
+ Memory operations issued before the UNLOCK will be completed before the
+ UNLOCK operation has completed.
+
+ Memory operations issued after the UNLOCK may be completed before the
+ UNLOCK operation has completed.
+
+ (3) LOCK vs LOCK implication:
+
+ All LOCK operations issued before another LOCK operation will be completed
+ before that LOCK operation.
+
+ (4) LOCK vs UNLOCK implication:
+
+ All LOCK operations issued before an UNLOCK operation will be completed
+ before the UNLOCK operation.
+
+ All UNLOCK operations issued before a LOCK operation will be completed
+ before the LOCK operation.
+
+ (5) Failed conditional LOCK implication:
+
+ Certain variants of the LOCK operation may fail, either due to being
+ unable to get the lock immediately, or due to receiving an unblocked
+ signal whilst asleep waiting for the lock to become available. Failed
+ locks do not imply any sort of barrier.
+
+Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
+equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
+
+[!] Note: one of the consequence of LOCKs and UNLOCKs being only one-way
+ barriers is that the effects instructions outside of a critical section may
+ seep into the inside of the critical section.
+
+Locks and semaphores may not provide any guarantee of ordering on UP compiled
+systems, and so cannot be counted on in such a situation to actually achieve
+anything at all - especially with respect to I/O accesses - unless combined
+with interrupt disabling operations.
+
+See also the section on "Inter-CPU locking barrier effects".
+
+
+As an example, consider the following:
+
+ *A = a;
+ *B = b;
+ LOCK
+ *C = c;
+ *D = d;
+ UNLOCK
+ *E = e;
+ *F = f;
+
+The following sequence of events is acceptable:
+
+ LOCK, {*F,*A}, *E, {*C,*D}, *B, UNLOCK
+
+ [+] Note that {*F,*A} indicates a combined access.
+
+But none of the following are:
+
+ {*F,*A}, *B, LOCK, *C, *D, UNLOCK, *E
+ *A, *B, *C, LOCK, *D, UNLOCK, *E, *F
+ *A, *B, LOCK, *C, UNLOCK, *D, *E, *F
+ *B, LOCK, *C, *D, UNLOCK, {*F,*A}, *E
+
+
+
+INTERRUPT DISABLING FUNCTIONS
+-----------------------------
+
+Functions that disable interrupts (LOCK equivalent) and enable interrupts
+(UNLOCK equivalent) will act as compiler barriers only. So if memory or I/O
+barriers are required in such a situation, they must be provided from some
+other means.
+
+
+MISCELLANEOUS FUNCTIONS
+-----------------------
+
+Other functions that imply barriers:
+
+ (*) schedule() and similar imply full memory barriers.
+
+ (*) Memory allocation and release functions imply full memory barriers.
+
+
+=================================
+INTER-CPU LOCKING BARRIER EFFECTS
+=================================
+
+On SMP systems locking primitives give a more substantial form of barrier: one
+that does affect memory access ordering on other CPUs, within the context of
+conflict on any particular lock.
+
+
+LOCKS VS MEMORY ACCESSES
+------------------------
+
+Consider the following: the system has a pair of spinlocks (N) and (Q), and
+three CPUs; then should the following sequence of events occur:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ *A = a; *E = e;
+ LOCK M LOCK Q
+ *B = b; *F = f;
+ *C = c; *G = g;
+ UNLOCK M UNLOCK Q
+ *D = d; *H = h;
+
+Then there is no guarantee as to what order CPU #3 will see the accesses to *A
+through *H occur in, other than the constraints imposed by the separate locks
+on the separate CPUs. It might, for example, see:
+
+ *E, LOCK M, LOCK Q, *G, *C, *F, *A, *B, UNLOCK Q, *D, *H, UNLOCK M
+
+But it won't see any of:
+
+ *B, *C or *D preceding LOCK M
+ *A, *B or *C following UNLOCK M
+ *F, *G or *H preceding LOCK Q
+ *E, *F or *G following UNLOCK Q
+
+
+However, if the following occurs:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ *A = a;
+ LOCK M [1]
+ *B = b;
+ *C = c;
+ UNLOCK M [1]
+ *D = d; *E = e;
+ LOCK M [2]
+ *F = f;
+ *G = g;
+ UNLOCK M [2]
+ *H = h;
+
+CPU #3 might see:
+
+ *E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
+ LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
+
+But assuming CPU #1 gets the lock first, it won't see any of:
+
+ *B, *C, *D, *F, *G or *H preceding LOCK M [1]
+ *A, *B or *C following UNLOCK M [1]
+ *F, *G or *H preceding LOCK M [2]
+ *A, *B, *C, *E, *F or *G following UNLOCK M [2]
+
+
+LOCKS VS I/O ACCESSES
+---------------------
+
+Under certain circumstances (especially involving NUMA), I/O accesses within
+two spinlocked sections on two different CPUs may be seen as interleaved by the
+PCI bridge, because the PCI bridge does not necessarily participate in the
+cache-coherence protocol, and is therefore incapable of issuing the required
+read memory barriers.
+
+For example:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ spin_lock(Q)
+ writel(0, ADDR)
+ writel(1, DATA);
+ spin_unlock(Q);
+ spin_lock(Q);
+ writel(4, ADDR);
+ writel(5, DATA);
+ spin_unlock(Q);
+
+may be seen by the PCI bridge as follows:
+
+ STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
+
+which would probably cause the hardware to malfunction.
+
+
+What is necessary here is to intervene with an mmiowb() before dropping the
+spinlock, for example:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ spin_lock(Q)
+ writel(0, ADDR)
+ writel(1, DATA);
+ mmiowb();
+ spin_unlock(Q);
+ spin_lock(Q);
+ writel(4, ADDR);
+ writel(5, DATA);
+ mmiowb();
+ spin_unlock(Q);
+
+this will ensure that the two stores issued on CPU #1 appear at the PCI bridge
+before either of the stores issued on CPU #2.
+
+
+Furthermore, following a store by a load to the same device obviates the need
+for an mmiowb(), because the load forces the store to complete before the load
+is performed:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ spin_lock(Q)
+ writel(0, ADDR)
+ a = readl(DATA);
+ spin_unlock(Q);
+ spin_lock(Q);
+ writel(4, ADDR);
+ b = readl(DATA);
+ spin_unlock(Q);
+
+
+See Documentation/DocBook/deviceiobook.tmpl for more information.
+
+
+=================================
+WHERE ARE MEMORY BARRIERS NEEDED?
+=================================
+
+Under normal operation, memory operation reordering is generally not going to
+be a problem as a single-threaded linear piece of code will still appear to
+work correctly, even if it's in an SMP kernel. There are, however, three
+circumstances in which reordering definitely _could_ be a problem:
+
+ (*) Interprocessor interaction.
+
+ (*) Atomic operations.
+
+ (*) Accessing devices (I/O).
+
+ (*) Interrupts.
+
+
+INTERPROCESSOR INTERACTION
+--------------------------
+
+When there's a system with more than one processor, more than one CPU in the
+system may be working on the same data set at the same time. This can cause
+synchronisation problems, and the usual way of dealing with them is to use
+locks. Locks, however, are quite expensive, and so it may be preferable to
+operate without the use of a lock if at all possible. In such a case
+operations that affect both CPUs may have to be carefully ordered to prevent
+a malfunction.
+
+Consider, for example, the R/W semaphore slow path. Here a waiting process is
+queued on the semaphore, by virtue of it having a piece of its stack linked to
+the semaphore's list of waiting processes:
+
+ struct rw_semaphore {
+ ...
+ spinlock_t lock;
+ struct list_head waiters;
+ };
+
+ struct rwsem_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ };
+
+To wake up a particular waiter, the up_read() or up_write() functions have to:
+
+ (1) read the next pointer from this waiter's record to know as to where the
+ next waiter record is;
+
+ (4) read the pointer to the waiter's task structure;
+
+ (3) clear the task pointer to tell the waiter it has been given the semaphore;
+
+ (4) call wake_up_process() on the task; and
+
+ (5) release the reference held on the waiter's task struct.
+
+In otherwords, it has to perform this sequence of events:
+
+ LOAD waiter->list.next;
+ LOAD waiter->task;
+ STORE waiter->task;
+ CALL wakeup
+ RELEASE task
+
+and if any of these steps occur out of order, then the whole thing may
+malfunction.
+
+Once it has queued itself and dropped the semaphore lock, the waiter does not
+get the lock again; it instead just waits for its task pointer to be cleared
+before proceeding. Since the record is on the waiter's stack, this means that
+if the task pointer is cleared _before_ the next pointer in the list is read,
+another CPU might start processing the waiter and might clobber the waiter's
+stack before the up*() function has a chance to read the next pointer.
+
+Consider then what might happen to the above sequence of events:
+
+ CPU 1 CPU 2
+ =============================== ===============================
+ down_xxx()
+ Queue waiter
+ Sleep
+ up_yyy()
+ LOAD waiter->task;
+ STORE waiter->task;
+ Woken up by other event
+
+ Resume processing
+ down_xxx() returns
+ call foo()
+ foo() clobbers *waiter
+
+ LOAD waiter->list.next;
+ --- OOPS ---
+
+This could be dealt with using the semaphore lock, but then the down_xxx()
+function has to needlessly get the spinlock again after being woken up.
+
+The way to deal with this is to insert a general SMP memory barrier:
+
+ LOAD waiter->list.next;
+ LOAD waiter->task;
+ smp_mb();
+ STORE waiter->task;
+ CALL wakeup
+ RELEASE task
+
+In this case, the barrier makes a guarantee that all memory accesses before the
+barrier will appear to happen before all the memory accesses after the barrier
+with respect to the other CPUs on the system. It does _not_ guarantee that all
+the memory accesses before the barrier will be complete by the time the barrier
+instruction itself is complete.
+
+On a UP system - where this wouldn't be a problem - the smp_mb() is just a
+compiler barrier, thus making sure the compiler emits the instructions in the
+right order without actually intervening in the CPU. Since there there's only
+one CPU, that CPU's dependency ordering logic will take care of everything
+else.
+
+
+ATOMIC OPERATIONS
+-----------------
+
+Though they are technically interprocessor interaction considerations, atomic
+operations are noted specially as they do _not_ generally imply memory
+barriers. The possible offenders include:
+
+ xchg();
+ cmpxchg();
+ test_and_set_bit();
+ test_and_clear_bit();
+ test_and_change_bit();
+ atomic_cmpxchg();
+ atomic_inc_return();
+ atomic_dec_return();
+ atomic_add_return();
+ atomic_sub_return();
+ atomic_inc_and_test();
+ atomic_dec_and_test();
+ atomic_sub_and_test();
+ atomic_add_negative();
+ atomic_add_unless();
+
+These may be used for such things as implementing LOCK operations or controlling
+the lifetime of objects by decreasing their reference counts. In such cases
+they need preceding memory barriers.
+
+The following may also be possible offenders as they may be used as UNLOCK
+operations.
+
+ set_bit();
+ clear_bit();
+ change_bit();
+ atomic_set();
+
+
+The following are a little tricky:
+
+ atomic_add();
+ atomic_sub();
+ atomic_inc();
+ atomic_dec();
+
+If they're used for statistics generation, then they probably don't need memory
+barriers, unless there's a coupling between statistical data.
+
+If they're used for reference counting on an object to control its lifetime,
+they probably don't need memory barriers because either the reference count
+will be adjusted inside a locked section, or the caller will already hold
+sufficient references to make the lock, and thus a memory barrier unnecessary.
+
+If they're used for constructing a lock of some description, then they probably
+do need memory barriers as a lock primitive generally has to do things in a
+specific order.
+
+
+Basically, each usage case has to be carefully considered as to whether memory
+barriers are needed or not. The simplest rule is probably: if the atomic
+operation is protected by a lock, then it does not require a barrier unless
+there's another operation within the critical section with respect to which an
+ordering must be maintained.
+
+See Documentation/atomic_ops.txt for more information.
+
+
+ACCESSING DEVICES
+-----------------
+
+Many devices can be memory mapped, and so appear to the CPU as if they're just
+a set of memory locations. To control such a device, the driver usually has to
+make the right memory accesses in exactly the right order.
+
+However, having a clever CPU or a clever compiler creates a potential problem
+in that the carefully sequenced accesses in the driver code won't reach the
+device in the requisite order if the CPU or the compiler thinks it is more
+efficient to reorder, combine or merge accesses - something that would cause
+the device to malfunction.
+
+Inside of the Linux kernel, I/O should be done through the appropriate accessor
+routines - such as inb() or writel() - which know how to make such accesses
+appropriately sequential. Whilst this, for the most part, renders the explicit
+use of memory barriers unnecessary, there are a couple of situations where they
+might be needed:
+
+ (1) On some systems, I/O stores are not strongly ordered across all CPUs, and
+ so for _all_ general drivers locks should be used and mmiowb() must be
+ issued prior to unlocking the critical section.
+
+ (2) If the accessor functions are used to refer to an I/O memory window with
+ relaxed memory access properties, then _mandatory_ memory barriers are
+ required to enforce ordering.
+
+See Documentation/DocBook/deviceiobook.tmpl for more information.
+
+
+INTERRUPTS
+----------
+
+A driver may be interrupted by its own interrupt service routine, and thus the
+two parts of the driver may interfere with each other's attempts to control or
+access the device.
+
+This may be alleviated - at least in part - by disabling local interrupts (a
+form of locking), such that the critical operations are all contained within
+the interrupt-disabled section in the driver. Whilst the driver's interrupt
+routine is executing, the driver's core may not run on the same CPU, and its
+interrupt is not permitted to happen again until the current interrupt has been
+handled, thus the interrupt handler does not need to lock against that.
+
+However, consider a driver that was talking to an ethernet card that sports an
+address register and a data register. If that driver's core talks to the card
+under interrupt-disablement and then the driver's interrupt handler is invoked:
+
+ LOCAL IRQ DISABLE
+ writew(ADDR, 3);
+ writew(DATA, y);
+ LOCAL IRQ ENABLE
+
+ writew(ADDR, 4);
+ q = readw(DATA);
+
+
+The store to the data register might happen after the second store to the
+address register if ordering rules are sufficiently relaxed:
+
+ STORE *ADDR = 3, STORE *ADDR = 4, STORE *DATA = y, q = LOAD *DATA
+
+
+If ordering rules are relaxed, it must be assumed that accesses done inside an
+interrupt disabled section may leak outside of it and may interleave with
+accesses performed in an interrupt - and vice versa - unless implicit or
+explicit barriers are used.
+
+Normally this won't be a problem because the I/O accesses done inside such
+sections will include synchronous load operations on strictly ordered I/O
+registers that form implicit I/O barriers. If this isn't sufficient then an
+mmiowb() may need to be used explicitly.
+
+
+A similar situation may occur between an interrupt routine and two routines
+running on separate CPUs that communicate with each other. If such a case is
+likely, then interrupt-disabling locks should be used to guarantee ordering.
+
+
+==========================
+KERNEL I/O BARRIER EFFECTS
+==========================
+
+When accessing I/O memory, drivers should use the appropriate accessor
+functions:
+
+ (*) inX(), outX():
+
+ These are intended to talk to I/O space rather than memory space, but
+ that's primarily a CPU-specific concept. The i386 and x86_64 processors do
+ indeed have special I/O space access cycles and instructions, but many
+ CPUs don't have such a concept.
+
+ The PCI bus, amongst others, defines an I/O space concept - which on such
+ CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O
+ space. However, it may also mapped as a virtual I/O space in the CPU's
+ memory map, particularly on those CPUs that don't support alternate
+ I/O spaces.
+
+ Accesses to this space may be fully synchronous (as on i386), but
+ intermediary bridges (such as the PCI host bridge) may not fully honour
+ that.
+
+ They are guaranteed to be fully ordered with respect to each other.
+
+ They are not guaranteed to be fully ordered with respect to other types of
+ memory and I/O operation.
+
+ (*) readX(), writeX():
+
+ Whether these are guaranteed to be fully ordered and uncombined with
+ respect to each other on the issuing CPU depends on the characteristics
+ defined for the memory window through which they're accessing. On later
+ i386 architecture machines, for example, this is controlled by way of the
+ MTRR registers.
+
+ Ordinarily, these will be guaranteed to be fully ordered and uncombined,,
+ provided they're not accessing a prefetchable device.
+
+ However, intermediary hardware (such as a PCI bridge) may indulge in
+ deferral if it so wishes; to flush a store, a load from the same location
+ is preferred[*], but a load from the same device or from configuration
+ space should suffice for PCI.
+
+ [*] NOTE! attempting to load from the same location as was written to may
+ cause a malfunction - consider the 16550 Rx/Tx serial registers for
+ example.
+
+ Used with prefetchable I/O memory, an mmiowb() barrier may be required to
+ force stores to be ordered.
+
+ Please refer to the PCI specification for more information on interactions
+ between PCI transactions.
+
+ (*) readX_relaxed()
+
+ These are similar to readX(), but are not guaranteed to be ordered in any
+ way. Be aware that there is no I/O read barrier available.
+
+ (*) ioreadX(), iowriteX()
+
+ These will perform as appropriate for the type of access they're actually
+ doing, be it inX()/outX() or readX()/writeX().
+
+
+========================================
+ASSUMED MINIMUM EXECUTION ORDERING MODEL
+========================================
+
+It has to be assumed that the conceptual CPU is weakly-ordered but that it will
+maintain the appearance of program causality with respect to itself. Some CPUs
+(such as i386 or x86_64) are more constrained than others (such as powerpc or
+frv), and so the most relaxed case (namely DEC Alpha) must be assumed outside
+of arch-specific code.
+
+This means that it must be considered that the CPU will execute its instruction
+stream in any order it feels like - or even in parallel - provided that if an
+instruction in the stream depends on the an earlier instruction, then that
+earlier instruction must be sufficiently complete[*] before the later
+instruction may proceed; in other words: provided that the appearance of
+causality is maintained.
+
+ [*] Some instructions have more than one effect - such as changing the
+ condition codes, changing registers or changing memory - and different
+ instructions may depend on different effects.
+
+A CPU may also discard any instruction sequence that winds up having no
+ultimate effect. For example, if two adjacent instructions both load an
+immediate value into the same register, the first may be discarded.
+
+
+Similarly, it has to be assumed that compiler might reorder the instruction
+stream in any way it sees fit, again provided the appearance of causality is
+maintained.
+
+
+============================
+THE EFFECTS OF THE CPU CACHE
+============================
+
+The way cached memory operations are perceived across the system is affected to
+a certain extent by the caches that lie between CPUs and memory, and by the
+memory coherence system that maintains the consistency of state in the system.
+
+As far as the way a CPU interacts with another part of the system through the
+caches goes, the memory system has to include the CPU's caches, and memory
+barriers for the most part act at the interface between the CPU and its cache
+(memory barriers logically act on the dotted line in the following diagram):
+
+ <--- CPU ---> : <----------- Memory ----------->
+ :
+ +--------+ +--------+ : +--------+ +-----------+
+ | | | | : | | | | +--------+
+ | CPU | | Memory | : | CPU | | | | |
+ | Core |--->| Access |----->| Cache |<-->| | | |
+ | | | Queue | : | | | |--->| Memory |
+ | | | | : | | | | | |
+ +--------+ +--------+ : +--------+ | | | |
+ : | Cache | +--------+
+ : | Coherency |
+ : | Mechanism | +--------+
+ +--------+ +--------+ : +--------+ | | | |
+ | | | | : | | | | | |
+ | CPU | | Memory | : | CPU | | |--->| Device |
+ | Core |--->| Access |----->| Cache |<-->| | | |
+ | | | Queue | : | | | | | |
+ | | | | : | | | | +--------+
+ +--------+ +--------+ : +--------+ +-----------+
+ :
+ :
+
+Although any particular load or store may not actually appear outside of the
+CPU that issued it since it may have been satisfied within the CPU's own cache,
+it will still appear as if the full memory access had taken place as far as the
+other CPUs are concerned since the cache coherency mechanisms will migrate the
+cacheline over to the accessing CPU and propagate the effects upon conflict.
+
+The CPU core may execute instructions in any order it deems fit, provided the
+expected program causality appears to be maintained. Some of the instructions
+generate load and store operations which then go into the queue of memory
+accesses to be performed. The core may place these in the queue in any order
+it wishes, and continue execution until it is forced to wait for an instruction
+to complete.
+
+What memory barriers are concerned with is controlling the order in which
+accesses cross from the CPU side of things to the memory side of things, and
+the order in which the effects are perceived to happen by the other observers
+in the system.
+
+[!] Memory barriers are _not_ needed within a given CPU, as CPUs always see
+their own loads and stores as if they had happened in program order.
+
+[!] MMIO or other device accesses may bypass the cache system. This depends on
+the properties of the memory window through which devices are accessed and/or
+the use of any special device communication instructions the CPU may have.
+
+
+CACHE COHERENCY
+---------------
+
+Life isn't quite as simple as it may appear above, however: for while the
+caches are expected to be coherent, there's no guarantee that that coherency
+will be ordered. This means that whilst changes made on one CPU will
+eventually become visible on all CPUs, there's no guarantee that they will
+become apparent in the same order on those other CPUs.
+
+
+Consider dealing with a system that has pair of CPUs (1 & 2), each of which has
+a pair of parallel data caches (CPU 1 has A/B, and CPU 2 has C/D):
+
+ :
+ : +--------+
+ : +---------+ | |
+ +--------+ : +--->| Cache A |<------->| |
+ | | : | +---------+ | |
+ | CPU 1 |<---+ | |
+ | | : | +---------+ | |
+ +--------+ : +--->| Cache B |<------->| |
+ : +---------+ | |
+ : | Memory |
+ : +---------+ | System |
+ +--------+ : +--->| Cache C |<------->| |
+ | | : | +---------+ | |
+ | CPU 2 |<---+ | |
+ | | : | +---------+ | |
+ +--------+ : +--->| Cache D |<------->| |
+ : +---------+ | |
+ : +--------+
+ :
+
+Imagine the system has the following properties:
+
+ (*) an odd-numbered cache line may be in cache A, cache C or it may still be
+ resident in memory;
+
+ (*) an even-numbered cache line may be in cache B, cache D or it may still be
+ resident in memory;
+
+ (*) whilst the CPU core is interrogating one cache, the other cache may be
+ making use of the bus to access the rest of the system - perhaps to
+ displace a dirty cacheline or to do a speculative load;
+
+ (*) each cache has a queue of operations that need to be applied to that cache
+ to maintain coherency with the rest of the system;
+
+ (*) the coherency queue is not flushed by normal loads to lines already
+ present in the cache, even though the contents of the queue may
+ potentially effect those loads.
+
+Imagine, then, that two writes are made on the first CPU, with a write barrier
+between them to guarantee that they will appear to reach that CPU's caches in
+the requisite order:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ u == 0, v == 1 and p == &u, q == &u
+ v = 2;
+ smp_wmb(); Make sure change to v visible before
+ change to p
+ v is now in cache A exclusively
+ p = &v;
+ p is now in cache B exclusively
+
+The write memory barrier forces the other CPUs in the system to perceive that
+the local CPU's caches have apparently been updated in the correct order. But
+now imagine that the second CPU that wants to read those values:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ ...
+ q = p;
+ x = *q;
+
+The above pair of reads may then fail to happen in expected order, as the
+cacheline holding p may get updated in one of the second CPU's caches whilst
+the update to the cacheline holding v is delayed in the other of the second
+CPU's caches by some other cache event:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ u == 0, v == 1 and p == &u, q == &u
+ v = 2;
+ smp_wmb();
+
+
+ p = &b; q = p;
+
+
+
+ x = *q;
+ Reads from v before v updated in cache
+
+
+
+Basically, whilst both cachelines will be updated on CPU 2 eventually, there's
+no guarantee that, without intervention, the order of update will be the same
+as that committed on CPU 1.
+
+
+To intervene, we need to interpolate a data dependency barrier or a read
+barrier between the loads. This will force the cache to commit its coherency
+queue before processing any further requests:
+
+ CPU 1 CPU 2 COMMENT
+ =============== =============== =======================================
+ u == 0, v == 1 and p == &u, q == &u
+ v = 2;
+ smp_wmb();
+
+
+ p = &b; q = p;
+
+
+
+ smp_read_barrier_depends()
+
+
+ x = *q;
+ Reads from v after v updated in cache
+
+
+This sort of problem can be encountered on DEC Alpha processors as they have a
+split cache that improves performance by making better use of the data bus.
+Whilst most CPUs do imply a data dependency barrier on the read when a memory
+access depends on a read, not all do, so it may not be relied on.
+
+Other CPUs may also have split caches, but must coordinate between the various
+cachelets for normal memory accesss. The semantics of the Alpha removes the
+need for coordination in absence of memory barriers.
+
+
+CACHE COHERENCY VS DMA
+----------------------
+
+Not all systems maintain cache coherency with respect to devices doing DMA. In
+such cases, a device attempting DMA may obtain stale data from RAM because
+dirty cache lines may be resident in the caches of various CPUs, and may not
+have been written back to RAM yet. To deal with this, the appropriate part of
+the kernel must flush the overlapping bits of cache on each CPU (and maybe
+invalidate them as well).
+
+In addition, the data DMA'd to RAM by a device may be overwritten by dirty
+cache lines being written back to RAM from a CPU's cache after the device has
+installed its own data, or cache lines simply present in a CPUs cache may
+simply obscure the fact that RAM has been updated, until at such time as the
+cacheline is discarded from the CPU's cache and reloaded. To deal with this,
+the appropriate part of the kernel must invalidate the overlapping bits of the
+cache on each CPU.
+
+See Documentation/cachetlb.txt for more information on cache management.
+
+
+CACHE COHERENCY VS MMIO
+-----------------------
+
+Memory mapped I/O usually takes place through memory locations that are part of
+a window in the CPU's memory space that have different properties assigned than
+the usual RAM directed window.
+
+Amongst these properties is usually the fact that such accesses bypass the
+caching entirely and go directly to the device buses. This means MMIO accesses
+may, in effect, overtake accesses to cached memory that were emitted earlier.
+A memory barrier isn't sufficient in such a case, but rather the cache must be
+flushed between the cached memory write and the MMIO access if the two are in
+any way dependent.
+
+
+=========================
+THE THINGS CPUS GET UP TO
+=========================
+
+A programmer might take it for granted that the CPU will perform memory
+operations in exactly the order specified, so that if a CPU is, for example,
+given the following piece of code to execute:
+
+ a = *A;
+ *B = b;
+ c = *C;
+ d = *D;
+ *E = e;
+
+They would then expect that the CPU will complete the memory operation for each
+instruction before moving on to the next one, leading to a definite sequence of
+operations as seen by external observers in the system:
+
+ LOAD *A, STORE *B, LOAD *C, LOAD *D, STORE *E.
+
+
+Reality is, of course, much messier. With many CPUs and compilers, the above
+assumption doesn't hold because:
+
+ (*) loads are more likely to need to be completed immediately to permit
+ execution progress, whereas stores can often be deferred without a
+ problem;
+
+ (*) loads may be done speculatively, and the result discarded should it prove
+ to have been unnecessary;
+
+ (*) loads may be done speculatively, leading to the result having being
+ fetched at the wrong time in the expected sequence of events;
+
+ (*) the order of the memory accesses may be rearranged to promote better use
+ of the CPU buses and caches;
+
+ (*) loads and stores may be combined to improve performance when talking to
+ memory or I/O hardware that can do batched accesses of adjacent locations,
+ thus cutting down on transaction setup costs (memory and PCI devices may
+ both be able to do this); and
+
+ (*) the CPU's data cache may affect the ordering, and whilst cache-coherency
+ mechanisms may alleviate this - once the store has actually hit the cache
+ - there's no guarantee that the coherency management will be propagated in
+ order to other CPUs.
+
+So what another CPU, say, might actually observe from the above piece of code
+is:
+
+ LOAD *A, ..., LOAD {*C,*D}, STORE *E, STORE *B
+
+ (Where "LOAD {*C,*D}" is a combined load)
+
+
+However, it is guaranteed that a CPU will be self-consistent: it will see its
+_own_ accesses appear to be correctly ordered, without the need for a memory
+barrier. For instance with the following code:
+
+ U = *A;
+ *A = V;
+ *A = W;
+ X = *A;
+ *A = Y;
+ Z = *A;
+
+and assuming no intervention by an external influence, it can be assumed that
+the final result will appear to be:
+
+ U == the original value of *A
+ X == W
+ Z == Y
+ *A == Y
+
+The code above may cause the CPU to generate the full sequence of memory
+accesses:
+
+ U=LOAD *A, STORE *A=V, STORE *A=W, X=LOAD *A, STORE *A=Y, Z=LOAD *A
+
+in that order, but, without intervention, the sequence may have almost any
+combination of elements combined or discarded, provided the program's view of
+the world remains consistent.
+
+The compiler may also combine, discard or defer elements of the sequence before
+the CPU even sees them.
+
+For instance:
+
+ *A = V;
+ *A = W;
+
+may be reduced to:
+
+ *A = W;
+
+since, without a write barrier, it can be assumed that the effect of the
+storage of V to *A is lost. Similarly:
+
+ *A = Y;
+ Z = *A;
+
+may, without a memory barrier, be reduced to:
+
+ *A = Y;
+ Z = Y;
+
+and the LOAD operation never appear outside of the CPU.
+
+
+AND THEN THERE'S THE ALPHA
+--------------------------
+
+The DEC Alpha CPU is one of the most relaxed CPUs there is. Not only that,
+some versions of the Alpha CPU have a split data cache, permitting them to have
+two semantically related cache lines updating at separate times. This is where
+the data dependency barrier really becomes necessary as this synchronises both
+caches with the memory coherence system, thus making it seem like pointer
+changes vs new data occur in the right order.
+
+The Alpha defines the Linux's kernel's memory barrier model.
+
+See the subsection on "Cache Coherency" above.
+
+
+==========
+REFERENCES
+==========
+
+Alpha AXP Architecture Reference Manual, Second Edition (Sites & Witek,
+Digital Press)
+ Chapter 5.2: Physical Address Space Characteristics
+ Chapter 5.4: Caches and Write Buffers
+ Chapter 5.5: Data Sharing
+ Chapter 5.6: Read/Write Ordering
+
+AMD64 Architecture Programmer's Manual Volume 2: System Programming
+ Chapter 7.1: Memory-Access Ordering
+ Chapter 7.4: Buffering and Combining Memory Writes
+
+IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+System Programming Guide
+ Chapter 7.1: Locked Atomic Operations
+ Chapter 7.2: Memory Ordering
+ Chapter 7.4: Serializing Instructions
+
+The SPARC Architecture Manual, Version 9
+ Chapter 8: Memory Models
+ Appendix D: Formal Specification of the Memory Models
+ Appendix J: Programming with the Memory Models
+
+UltraSPARC Programmer Reference Manual
+ Chapter 5: Memory Accesses and Cacheability
+ Chapter 15: Sparc-V9 Memory Models
+
+UltraSPARC III Cu User's Manual
+ Chapter 9: Memory Models
+
+UltraSPARC IIIi Processor User's Manual
+ Chapter 8: Memory Models
+
+UltraSPARC Architecture 2005
+ Chapter 9: Memory
+ Appendix D: Formal Specifications of the Memory Models
+
+UltraSPARC T1 Supplement to the UltraSPARC Architecture 2005
+ Chapter 8: Memory Models
+ Appendix F: Caches and Cache Coherency
+
+Solaris Internals, Core Kernel Architecture, p63-68:
+ Chapter 3.3: Hardware Considerations for Locks and
+ Synchronization
+
+Unix Systems for Modern Architectures, Symmetric Multiprocessing and Caching
+for Kernel Programmers:
+ Chapter 13: Other Memory Models
+
+Intel Itanium Architecture Software Developer's Manual: Volume 1:
+ Section 2.6: Speculation
+ Section 4.4: Memory Access
diff --git a/trunk/Documentation/networking/bcm43xx.txt b/trunk/Documentation/networking/bcm43xx.txt
new file mode 100644
index 000000000000..28541d2bee1e
--- /dev/null
+++ b/trunk/Documentation/networking/bcm43xx.txt
@@ -0,0 +1,36 @@
+
+ BCM43xx Linux Driver Project
+ ============================
+
+About this software
+-------------------
+
+The goal of this project is to develop a linux driver for Broadcom
+BCM43xx chips, based on the specification at
+http://bcm-specs.sipsolutions.net/
+
+The project page is http://bcm43xx.berlios.de/
+
+
+Requirements
+------------
+
+1) Linux Kernel 2.6.16 or later
+ http://www.kernel.org/
+
+ You may want to configure your kernel with:
+
+ CONFIG_DEBUG_FS (optional):
+ -> Kernel hacking
+ -> Debug Filesystem
+
+2) SoftMAC IEEE 802.11 Networking Stack extension and patched ieee80211
+ modules:
+ http://softmac.sipsolutions.net/
+
+3) Firmware Files
+
+ Please try fwcutter. Fwcutter can extract the firmware from various
+ binary driver files. It supports driver files from Windows, MacOS and
+ Linux. You can get fwcutter from http://bcm43xx.berlios.de/.
+ Also, fwcutter comes with a README file for further instructions.
diff --git a/trunk/arch/alpha/kernel/alpha_ksyms.c b/trunk/arch/alpha/kernel/alpha_ksyms.c
index 1898ea79d0e2..9d6186d50245 100644
--- a/trunk/arch/alpha/kernel/alpha_ksyms.c
+++ b/trunk/arch/alpha/kernel/alpha_ksyms.c
@@ -216,8 +216,6 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(get_wchan);
-
#ifdef CONFIG_ALPHA_IRONGATE
EXPORT_SYMBOL(irongate_ioremap);
EXPORT_SYMBOL(irongate_iounmap);
diff --git a/trunk/arch/alpha/kernel/core_marvel.c b/trunk/arch/alpha/kernel/core_marvel.c
index 44866cb26a80..7f6a98455e74 100644
--- a/trunk/arch/alpha/kernel/core_marvel.c
+++ b/trunk/arch/alpha/kernel/core_marvel.c
@@ -435,7 +435,7 @@ marvel_specify_io7(char *str)
str = pchar;
} while(*str);
- return 0;
+ return 1;
}
__setup("io7=", marvel_specify_io7);
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index ba46d779ede7..e91db542eb01 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -839,6 +839,8 @@ source "drivers/misc/Kconfig"
source "drivers/mfd/Kconfig"
+source "drivers/leds/Kconfig"
+
source "drivers/media/Kconfig"
source "drivers/video/Kconfig"
diff --git a/trunk/arch/arm/common/sharpsl_pm.c b/trunk/arch/arm/common/sharpsl_pm.c
index 978d32e82d39..3cd8c9ee4510 100644
--- a/trunk/arch/arm/common/sharpsl_pm.c
+++ b/trunk/arch/arm/common/sharpsl_pm.c
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include
#include
@@ -75,6 +76,7 @@ static void sharpsl_battery_thread(void *private_);
struct sharpsl_pm_status sharpsl_pm;
DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
static int get_percentage(int voltage)
@@ -190,10 +192,10 @@ void sharpsl_pm_led(int val)
dev_err(sharpsl_pm.dev, "Charging Error!\n");
} else if (val == SHARPSL_LED_ON) {
dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
-
+ led_trigger_event(sharpsl_charge_led_trigger, LED_FULL);
} else {
dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
-
+ led_trigger_event(sharpsl_charge_led_trigger, LED_OFF);
}
}
@@ -786,6 +788,8 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
init_timer(&sharpsl_pm.chrg_full_timer);
sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
+ led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger);
+
sharpsl_pm.machinfo->init();
device_create_file(&pdev->dev, &dev_attr_battery_percentage);
@@ -807,6 +811,8 @@ static int sharpsl_pm_remove(struct platform_device *pdev)
device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
+ led_trigger_unregister_simple(sharpsl_charge_led_trigger);
+
sharpsl_pm.machinfo->exit();
del_timer_sync(&sharpsl_pm.chrg_full_timer);
diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c
index 489c069e5c3e..1ff75cee4b0d 100644
--- a/trunk/arch/arm/kernel/process.c
+++ b/trunk/arch/arm/kernel/process.c
@@ -474,4 +474,3 @@ unsigned long get_wchan(struct task_struct *p)
} while (count ++ < 16);
return 0;
}
-EXPORT_SYMBOL(get_wchan);
diff --git a/trunk/arch/arm/mach-pxa/corgi.c b/trunk/arch/arm/mach-pxa/corgi.c
index 68923b1d2b62..d6d726036361 100644
--- a/trunk/arch/arm/mach-pxa/corgi.c
+++ b/trunk/arch/arm/mach-pxa/corgi.c
@@ -141,6 +141,8 @@ struct corgissp_machinfo corgi_ssp_machinfo = {
*/
static struct corgibl_machinfo corgi_bl_machinfo = {
.max_intensity = 0x2f,
+ .default_intensity = 0x1f,
+ .limit_mask = 0x0b,
.set_bl_intensity = corgi_bl_set_intensity,
};
@@ -163,6 +165,14 @@ static struct platform_device corgikbd_device = {
};
+/*
+ * Corgi LEDs
+ */
+static struct platform_device corgiled_device = {
+ .name = "corgi-led",
+ .id = -1,
+};
+
/*
* Corgi Touch Screen Device
*/
@@ -297,6 +307,7 @@ static struct platform_device *devices[] __initdata = {
&corgikbd_device,
&corgibl_device,
&corgits_device,
+ &corgiled_device,
};
static void __init corgi_init(void)
diff --git a/trunk/arch/arm/mach-pxa/spitz.c b/trunk/arch/arm/mach-pxa/spitz.c
index 0dbb079ecd25..19b372df544a 100644
--- a/trunk/arch/arm/mach-pxa/spitz.c
+++ b/trunk/arch/arm/mach-pxa/spitz.c
@@ -220,6 +220,8 @@ struct corgissp_machinfo spitz_ssp_machinfo = {
* Spitz Backlight Device
*/
static struct corgibl_machinfo spitz_bl_machinfo = {
+ .default_intensity = 0x1f,
+ .limit_mask = 0x0b,
.max_intensity = 0x2f,
};
@@ -241,6 +243,14 @@ static struct platform_device spitzkbd_device = {
};
+/*
+ * Spitz LEDs
+ */
+static struct platform_device spitzled_device = {
+ .name = "spitz-led",
+ .id = -1,
+};
+
/*
* Spitz Touch Screen Device
*/
@@ -418,6 +428,7 @@ static struct platform_device *devices[] __initdata = {
&spitzkbd_device,
&spitzts_device,
&spitzbl_device,
+ &spitzled_device,
};
static void __init common_init(void)
diff --git a/trunk/arch/arm/mach-pxa/tosa.c b/trunk/arch/arm/mach-pxa/tosa.c
index 66ec71756d0f..76c0e7f0a219 100644
--- a/trunk/arch/arm/mach-pxa/tosa.c
+++ b/trunk/arch/arm/mach-pxa/tosa.c
@@ -251,10 +251,19 @@ static struct platform_device tosakbd_device = {
.id = -1,
};
+/*
+ * Tosa LEDs
+ */
+static struct platform_device tosaled_device = {
+ .name = "tosa-led",
+ .id = -1,
+};
+
static struct platform_device *devices[] __initdata = {
&tosascoop_device,
&tosascoop_jc_device,
&tosakbd_device,
+ &tosaled_device,
};
static void __init tosa_init(void)
diff --git a/trunk/arch/arm26/kernel/armksyms.c b/trunk/arch/arm26/kernel/armksyms.c
index 811a6376c624..a6a1b3373444 100644
--- a/trunk/arch/arm26/kernel/armksyms.c
+++ b/trunk/arch/arm26/kernel/armksyms.c
@@ -212,8 +212,6 @@ EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(sys_exit);
EXPORT_SYMBOL(sys_wait4);
-EXPORT_SYMBOL(get_wchan);
-
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(kernel_flag);
#endif
diff --git a/trunk/arch/frv/kernel/frv_ksyms.c b/trunk/arch/frv/kernel/frv_ksyms.c
index aa6b7d0a2109..07c8ffa0dd39 100644
--- a/trunk/arch/frv/kernel/frv_ksyms.c
+++ b/trunk/arch/frv/kernel/frv_ksyms.c
@@ -79,8 +79,6 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
-EXPORT_SYMBOL(get_wchan);
-
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
EXPORT_SYMBOL(atomic_test_and_OR_mask);
diff --git a/trunk/arch/h8300/kernel/h8300_ksyms.c b/trunk/arch/h8300/kernel/h8300_ksyms.c
index 69d6ad32d56c..b6cd78c972bb 100644
--- a/trunk/arch/h8300/kernel/h8300_ksyms.c
+++ b/trunk/arch/h8300/kernel/h8300_ksyms.c
@@ -55,8 +55,6 @@ EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(get_wchan);
-
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
diff --git a/trunk/arch/i386/kernel/apic.c b/trunk/arch/i386/kernel/apic.c
index eb5279d23b7f..6273bf74c203 100644
--- a/trunk/arch/i386/kernel/apic.c
+++ b/trunk/arch/i386/kernel/apic.c
@@ -415,6 +415,7 @@ void __init init_bsp_APIC(void)
void __devinit setup_local_APIC(void)
{
unsigned long oldvalue, value, ver, maxlvt;
+ int i, j;
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (esr_disable) {
@@ -451,6 +452,25 @@ void __devinit setup_local_APIC(void)
value &= ~APIC_TPRI_MASK;
apic_write_around(APIC_TASKPRI, value);
+ /*
+ * After a crash, we no longer service the interrupts and a pending
+ * interrupt from previous kernel might still have ISR bit set.
+ *
+ * Most probably by now CPU has serviced that pending interrupt and
+ * it might not have done the ack_APIC_irq() because it thought,
+ * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+ * does not clear the ISR bit and cpu thinks it has already serivced
+ * the interrupt. Hence a vector might get locked. It was noticed
+ * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+ */
+ for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ value = apic_read(APIC_ISR + i*0x10);
+ for (j = 31; j >= 0; j--) {
+ if (value & (1<
*
- * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O APIC code.
- * In particular, we now have separate handlers for edge
- * and level triggered interrupts.
- * 00/10/27 Asit Mallick, Goutham Rao IRQ vector allocation
- * PCI to vector mapping, shared PCI interrupts.
- * 00/10/27 D. Mosberger Document things a bit more to make them more understandable.
- * Clean up much of the old IOSAPIC cruft.
- * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts and fixes for
- * ACPI S5(SoftOff) support.
+ * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O
+ * APIC code. In particular, we now have separate
+ * handlers for edge and level triggered
+ * interrupts.
+ * 00/10/27 Asit Mallick, Goutham Rao IRQ vector
+ * allocation PCI to vector mapping, shared PCI
+ * interrupts.
+ * 00/10/27 D. Mosberger Document things a bit more to make them more
+ * understandable. Clean up much of the old
+ * IOSAPIC cruft.
+ * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts
+ * and fixes for ACPI S5(SoftOff) support.
* 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT
- * 02/01/07 E. Focht Redirectable interrupt vectors in
- * iosapic_set_affinity(), initializations for
- * /proc/irq/#/smp_affinity
+ * 02/01/07 E. Focht Redirectable interrupt
+ * vectors in iosapic_set_affinity(),
+ * initializations for /proc/irq/#/smp_affinity
* 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing.
* 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq
- * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to IOSAPIC mapping
- * error
+ * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to
+ * IOSAPIC mapping error
* 02/07/29 T. Kochi Allocate interrupt vectors dynamically
- * 02/08/04 T. Kochi Cleaned up terminology (irq, global system interrupt, vector, etc.)
- * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's pci_irq code.
+ * 02/08/04 T. Kochi Cleaned up terminology (irq, global system
+ * interrupt, vector, etc.)
+ * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's
+ * pci_irq code.
* 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC.
- * Remove iosapic_address & gsi_base from external interfaces.
- * Rationalize __init/__devinit attributes.
+ * Remove iosapic_address & gsi_base from
+ * external interfaces. Rationalize
+ * __init/__devinit attributes.
* 04/12/04 Ashok Raj Intel Corporation 2004
- * Updated to work with irq migration necessary for CPU Hotplug
+ * Updated to work with irq migration necessary
+ * for CPU Hotplug
*/
/*
- * Here is what the interrupt logic between a PCI device and the kernel looks like:
+ * Here is what the interrupt logic between a PCI device and the kernel looks
+ * like:
*
- * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, INTD). The
- * device is uniquely identified by its bus--, and slot-number (the function
- * number does not matter here because all functions share the same interrupt
- * lines).
+ * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC,
+ * INTD). The device is uniquely identified by its bus-, and slot-number
+ * (the function number does not matter here because all functions share
+ * the same interrupt lines).
*
- * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC controller.
- * Multiple interrupt lines may have to share the same IOSAPIC pin (if they're level
- * triggered and use the same polarity). Each interrupt line has a unique Global
- * System Interrupt (GSI) number which can be calculated as the sum of the controller's
- * base GSI number and the IOSAPIC pin number to which the line connects.
+ * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC
+ * controller. Multiple interrupt lines may have to share the same
+ * IOSAPIC pin (if they're level triggered and use the same polarity).
+ * Each interrupt line has a unique Global System Interrupt (GSI) number
+ * which can be calculated as the sum of the controller's base GSI number
+ * and the IOSAPIC pin number to which the line connects.
*
- * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the IOSAPIC pin
- * into the IA-64 interrupt vector. This interrupt vector is then sent to the CPU.
+ * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the
+ * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then
+ * sent to the CPU.
*
- * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is used as
- * architecture-independent interrupt handling mechanism in Linux. As an
- * IRQ is a number, we have to have IA-64 interrupt vector number <-> IRQ number
- * mapping. On smaller systems, we use one-to-one mapping between IA-64 vector and
- * IRQ. A platform can implement platform_irq_to_vector(irq) and
+ * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is
+ * used as architecture-independent interrupt handling mechanism in Linux.
+ * As an IRQ is a number, we have to have
+ * IA-64 interrupt vector number <-> IRQ number mapping. On smaller
+ * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
+ * platform can implement platform_irq_to_vector(irq) and
* platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
* Please see also include/asm-ia64/hw_irq.h for those APIs.
*
@@ -64,9 +75,9 @@
*
* PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
*
- * Note: The term "IRQ" is loosely used everywhere in Linux kernel to describe interrupts.
- * Now we use "IRQ" only for Linux IRQ's. ISA IRQ (isa_irq) is the only exception in this
- * source code.
+ * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
+ * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
+ * (isa_irq) is the only exception in this source code.
*/
#include
@@ -90,7 +101,6 @@
#include
#include
-
#undef DEBUG_INTERRUPT_ROUTING
#ifdef DEBUG_INTERRUPT_ROUTING
@@ -99,36 +109,46 @@
#define DBG(fmt...)
#endif
-#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info))
+#define NR_PREALLOCATE_RTE_ENTRIES \
+ (PAGE_SIZE / sizeof(struct iosapic_rte_info))
#define RTE_PREALLOCATED (1)
static DEFINE_SPINLOCK(iosapic_lock);
-/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
+/*
+ * These tables map IA-64 vectors to the IOSAPIC pin that generates this
+ * vector.
+ */
struct iosapic_rte_info {
- struct list_head rte_list; /* node in list of RTEs sharing the same vector */
+ struct list_head rte_list; /* node in list of RTEs sharing the
+ * same vector */
char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
+ unsigned int gsi_base; /* first GSI assigned to this
+ * IOSAPIC */
char rte_index; /* IOSAPIC RTE index */
int refcnt; /* reference counter */
unsigned int flags; /* flags */
} ____cacheline_aligned;
static struct iosapic_intr_info {
- struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */
+ struct list_head rtes; /* RTEs using this vector (empty =>
+ * not an IOSAPIC interrupt) */
int count; /* # of RTEs that shares this vector */
- u32 low32; /* current value of low word of Redirection table entry */
+ u32 low32; /* current value of low word of
+ * Redirection table entry */
unsigned int dest; /* destination CPU physical ID */
unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
- unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */
+ unsigned char polarity: 1; /* interrupt polarity
+ * (see iosapic.h) */
unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
} iosapic_intr_info[IA64_NUM_VECTORS];
static struct iosapic {
char __iomem *addr; /* base address of IOSAPIC */
- unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
- unsigned short num_rte; /* number of RTE in this IOSAPIC */
+ unsigned int gsi_base; /* first GSI assigned to this
+ * IOSAPIC */
+ unsigned short num_rte; /* # of RTEs on this IOSAPIC */
int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
#ifdef CONFIG_NUMA
unsigned short node; /* numa node association via pxm */
@@ -149,7 +169,8 @@ find_iosapic (unsigned int gsi)
int i;
for (i = 0; i < NR_IOSAPICS; i++) {
- if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < iosapic_lists[i].num_rte)
+ if ((unsigned) (gsi - iosapic_lists[i].gsi_base) <
+ iosapic_lists[i].num_rte)
return i;
}
@@ -162,7 +183,8 @@ _gsi_to_vector (unsigned int gsi)
struct iosapic_intr_info *info;
struct iosapic_rte_info *rte;
- for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info)
+ for (info = iosapic_intr_info; info <
+ iosapic_intr_info + IA64_NUM_VECTORS; ++info)
list_for_each_entry(rte, &info->rtes, rte_list)
if (rte->gsi_base + rte->rte_index == gsi)
return info - iosapic_intr_info;
@@ -185,8 +207,8 @@ gsi_to_irq (unsigned int gsi)
unsigned long flags;
int irq;
/*
- * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
- * numbers...
+ * XXX fix me: this assumes an identity mapping between IA-64 vector
+ * and Linux irq numbers...
*/
spin_lock_irqsave(&iosapic_lock, flags);
{
@@ -197,7 +219,8 @@ gsi_to_irq (unsigned int gsi)
return irq;
}
-static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec)
+static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi,
+ unsigned int vec)
{
struct iosapic_rte_info *rte;
@@ -237,7 +260,9 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == vector) {
- set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
+ set_irq_affinity_info(irq,
+ (int)(dest & 0xffff),
+ redir);
break;
}
}
@@ -259,7 +284,7 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
}
static void
-nop (unsigned int vector)
+nop (unsigned int irq)
{
/* do nothing... */
}
@@ -281,7 +306,8 @@ mask_irq (unsigned int irq)
{
/* set only the mask bit */
low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
+ rte_list) {
addr = rte->addr;
rte_index = rte->rte_index;
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
@@ -306,7 +332,8 @@ unmask_irq (unsigned int irq)
spin_lock_irqsave(&iosapic_lock, flags);
{
low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
+ rte_list) {
addr = rte->addr;
rte_index = rte->rte_index;
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
@@ -346,21 +373,25 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
spin_lock_irqsave(&iosapic_lock, flags);
{
- low32 = iosapic_intr_info[vec].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
+ low32 = iosapic_intr_info[vec].low32 &
+ ~(7 << IOSAPIC_DELIVERY_SHIFT);
if (redir)
/* change delivery mode to lowest priority */
- low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
+ low32 |= (IOSAPIC_LOWEST_PRIORITY <<
+ IOSAPIC_DELIVERY_SHIFT);
else
/* change delivery mode to fixed */
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
iosapic_intr_info[vec].low32 = low32;
iosapic_intr_info[vec].dest = dest;
- list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
+ list_for_each_entry(rte, &iosapic_intr_info[vec].rtes,
+ rte_list) {
addr = rte->addr;
rte_index = rte->rte_index;
- iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
+ iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index),
+ high32);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
}
@@ -433,7 +464,8 @@ iosapic_ack_edge_irq (unsigned int irq)
* interrupt for real. This prevents IRQ storms from unhandled
* devices.
*/
- if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) == (IRQ_PENDING|IRQ_DISABLED))
+ if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
+ (IRQ_PENDING|IRQ_DISABLED))
mask_irq(irq);
}
@@ -467,7 +499,8 @@ iosapic_version (char __iomem *addr)
return iosapic_read(addr, IOSAPIC_VERSION);
}
-static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol)
+static int iosapic_find_sharable_vector (unsigned long trigger,
+ unsigned long pol)
{
int i, vector = -1, min_count = -1;
struct iosapic_intr_info *info;
@@ -482,7 +515,8 @@ static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long po
for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
info = &iosapic_intr_info[i];
if (info->trigger == trigger && info->polarity == pol &&
- (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) {
+ (info->dmode == IOSAPIC_FIXED || info->dmode ==
+ IOSAPIC_LOWEST_PRIORITY)) {
if (min_count == -1 || info->count < min_count) {
vector = i;
min_count = info->count;
@@ -506,12 +540,15 @@ iosapic_reassign_vector (int vector)
new_vector = assign_irq_vector(AUTO_ASSIGN);
if (new_vector < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__);
- printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector);
+ printk(KERN_INFO "Reassigning vector %d to %d\n",
+ vector, new_vector);
memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector],
sizeof(struct iosapic_intr_info));
INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
- list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes);
- memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
+ list_move(iosapic_intr_info[vector].rtes.next,
+ &iosapic_intr_info[new_vector].rtes);
+ memset(&iosapic_intr_info[vector], 0,
+ sizeof(struct iosapic_intr_info));
iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
}
@@ -524,7 +561,8 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
int preallocated = 0;
if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
- rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
+ rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
+ NR_PREALLOCATE_RTE_ENTRIES);
if (!rte)
return NULL;
for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
@@ -532,7 +570,8 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
}
if (!list_empty(&free_rte_list)) {
- rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list);
+ rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
+ rte_list);
list_del(&rte->rte_list);
preallocated++;
} else {
@@ -575,7 +614,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
index = find_iosapic(gsi);
if (index < 0) {
- printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi);
+ printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n",
+ __FUNCTION__, gsi);
return -ENODEV;
}
@@ -586,7 +626,8 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
if (!rte) {
rte = iosapic_alloc_rte();
if (!rte) {
- printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: cannot allocate memory\n",
+ __FUNCTION__);
return -ENOMEM;
}
@@ -602,7 +643,9 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
else if (vector_is_shared(vector)) {
struct iosapic_intr_info *info = &iosapic_intr_info[vector];
if (info->trigger != trigger || info->polarity != polarity) {
- printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
+ printk (KERN_WARNING
+ "%s: cannot override the interrupt\n",
+ __FUNCTION__);
return -EINVAL;
}
}
@@ -619,8 +662,10 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
idesc = irq_descp(vector);
if (idesc->handler != irq_type) {
if (idesc->handler != &no_irq_type)
- printk(KERN_WARNING "%s: changing vector %d from %s to %s\n",
- __FUNCTION__, vector, idesc->handler->typename, irq_type->typename);
+ printk(KERN_WARNING
+ "%s: changing vector %d from %s to %s\n",
+ __FUNCTION__, vector,
+ idesc->handler->typename, irq_type->typename);
idesc->handler = irq_type;
}
return 0;
@@ -681,7 +726,7 @@ get_target_cpu (unsigned int gsi, int vector)
if (!num_cpus)
goto skip_numa_setup;
- /* Use vector assigment to distribute across cpus in node */
+ /* Use vector assignment to distribute across cpus in node */
cpu_index = vector % num_cpus;
for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
@@ -703,7 +748,7 @@ get_target_cpu (unsigned int gsi, int vector)
} while (!cpu_online(cpu));
return cpu_physical_id(cpu);
-#else
+#else /* CONFIG_SMP */
return cpu_physical_id(smp_processor_id());
#endif
}
@@ -755,7 +800,8 @@ iosapic_register_intr (unsigned int gsi,
if (list_empty(&iosapic_intr_info[vector].rtes))
free_irq_vector(vector);
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+ spin_unlock_irqrestore(&irq_descp(vector)->lock,
+ flags);
goto again;
}
@@ -764,7 +810,8 @@ iosapic_register_intr (unsigned int gsi,
polarity, trigger);
if (err < 0) {
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+ spin_unlock_irqrestore(&irq_descp(vector)->lock,
+ flags);
return err;
}
@@ -806,7 +853,8 @@ iosapic_unregister_intr (unsigned int gsi)
*/
irq = gsi_to_irq(gsi);
if (irq < 0) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
+ printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
+ gsi);
WARN_ON(1);
return;
}
@@ -817,7 +865,9 @@ iosapic_unregister_intr (unsigned int gsi)
spin_lock(&iosapic_lock);
{
if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
- printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
+ printk(KERN_ERR
+ "iosapic_unregister_intr(%u) unbalanced\n",
+ gsi);
WARN_ON(1);
goto out;
}
@@ -827,7 +877,8 @@ iosapic_unregister_intr (unsigned int gsi)
/* Mask the interrupt */
low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
- iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32);
+ iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index),
+ low32);
/* Remove the rte entry from the list */
list_del(&rte->rte_list);
@@ -840,7 +891,9 @@ iosapic_unregister_intr (unsigned int gsi)
trigger = iosapic_intr_info[vector].trigger;
polarity = iosapic_intr_info[vector].polarity;
dest = iosapic_intr_info[vector].dest;
- printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
+ printk(KERN_INFO
+ "GSI %u (%s, %s) -> CPU %d (0x%04x)"
+ " vector %d unregistered\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, vector);
@@ -853,12 +906,15 @@ iosapic_unregister_intr (unsigned int gsi)
idesc->handler = &no_irq_type;
/* Clear the interrupt information */
- memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
+ memset(&iosapic_intr_info[vector], 0,
+ sizeof(struct iosapic_intr_info));
iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
if (idesc->action) {
- printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq);
+ printk(KERN_ERR
+ "interrupt handlers still exist on"
+ "IRQ %u\n", irq);
WARN_ON(1);
}
@@ -873,7 +929,6 @@ iosapic_unregister_intr (unsigned int gsi)
/*
* ACPI calls this when it finds an entry for a platform interrupt.
- * Note that the irq_base and IOSAPIC address must be set in iosapic_init().
*/
int __init
iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
@@ -907,13 +962,16 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
mask = 1;
break;
default:
- printk(KERN_ERR "iosapic_register_platform_irq(): invalid int type 0x%x\n", int_type);
+ printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__,
+ int_type);
return -1;
}
register_intr(gsi, vector, delivery, polarity, trigger);
- printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
+ printk(KERN_INFO
+ "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
+ " vector %d\n",
int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown",
int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
@@ -923,10 +981,8 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
return vector;
}
-
/*
* ACPI calls this when it finds an entry for a legacy ISA IRQ override.
- * Note that the gsi_base and IOSAPIC address must be set in iosapic_init().
*/
void __init
iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
@@ -955,16 +1011,19 @@ iosapic_system_init (int system_pcat_compat)
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
- INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */
+ /* mark as unused */
+ INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
}
pcat_compat = system_pcat_compat;
if (pcat_compat) {
/*
- * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
- * enabled.
+ * Disable the compatibility mode interrupts (8259 style),
+ * needs IN/OUT support enabled.
*/
- printk(KERN_INFO "%s: Disabling PC-AT compatible 8259 interrupts\n", __FUNCTION__);
+ printk(KERN_INFO
+ "%s: Disabling PC-AT compatible 8259 interrupts\n",
+ __FUNCTION__);
outb(0xff, 0xA1);
outb(0xff, 0x21);
}
@@ -1004,10 +1063,7 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
base = iosapic_lists[index].gsi_base;
end = base + iosapic_lists[index].num_rte - 1;
- if (gsi_base < base && gsi_end < base)
- continue;/* OK */
-
- if (gsi_base > end && gsi_end > end)
+ if (gsi_end < base || end < gsi_base)
continue; /* OK */
return -EBUSY;
@@ -1053,12 +1109,14 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
if ((gsi_base == 0) && pcat_compat) {
/*
- * Map the legacy ISA devices into the IOSAPIC data. Some of these may
- * get reprogrammed later on with data from the ACPI Interrupt Source
- * Override table.
+ * Map the legacy ISA devices into the IOSAPIC data. Some of
+ * these may get reprogrammed later on with data from the ACPI
+ * Interrupt Source Override table.
*/
for (isa_irq = 0; isa_irq < 16; ++isa_irq)
- iosapic_override_isa_irq(isa_irq, isa_irq, IOSAPIC_POL_HIGH, IOSAPIC_EDGE);
+ iosapic_override_isa_irq(isa_irq, isa_irq,
+ IOSAPIC_POL_HIGH,
+ IOSAPIC_EDGE);
}
return 0;
}
@@ -1081,7 +1139,8 @@ iosapic_remove (unsigned int gsi_base)
if (iosapic_lists[index].rtes_inuse) {
err = -EBUSY;
- printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
+ printk(KERN_WARNING
+ "%s: IOSAPIC for GSI base %u is busy\n",
__FUNCTION__, gsi_base);
goto out;
}
diff --git a/trunk/arch/ia64/kernel/vmlinux.lds.S b/trunk/arch/ia64/kernel/vmlinux.lds.S
index 0b9e56dd7f05..783600fe52b2 100644
--- a/trunk/arch/ia64/kernel/vmlinux.lds.S
+++ b/trunk/arch/ia64/kernel/vmlinux.lds.S
@@ -70,6 +70,15 @@ SECTIONS
__stop___ex_table = .;
}
+ /* MCA table */
+ . = ALIGN(16);
+ __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
+ {
+ __start___mca_table = .;
+ *(__mca_table)
+ __stop___mca_table = .;
+ }
+
/* Global data */
_data = .;
@@ -130,15 +139,6 @@ SECTIONS
__initcall_end = .;
}
- /* MCA table */
- . = ALIGN(16);
- __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
- {
- __start___mca_table = .;
- *(__mca_table)
- __stop___mca_table = .;
- }
-
.data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
{
__start___vtop_patchlist = .;
diff --git a/trunk/arch/ia64/mm/init.c b/trunk/arch/ia64/mm/init.c
index 2ef1151cde90..cafa8776a53d 100644
--- a/trunk/arch/ia64/mm/init.c
+++ b/trunk/arch/ia64/mm/init.c
@@ -109,6 +109,7 @@ lazy_mmu_prot_update (pte_t pte)
{
unsigned long addr;
struct page *page;
+ unsigned long order;
if (!pte_exec(pte))
return; /* not an executable page... */
@@ -119,7 +120,12 @@ lazy_mmu_prot_update (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + PAGE_SIZE);
+ if (PageCompound(page)) {
+ order = (unsigned long) (page[1].lru.prev);
+ flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
+ }
+ else
+ flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
diff --git a/trunk/arch/ia64/mm/ioremap.c b/trunk/arch/ia64/mm/ioremap.c
index 62328621f99c..643ccc6960ce 100644
--- a/trunk/arch/ia64/mm/ioremap.c
+++ b/trunk/arch/ia64/mm/ioremap.c
@@ -21,12 +21,12 @@ __ioremap (unsigned long offset, unsigned long size)
void __iomem *
ioremap (unsigned long offset, unsigned long size)
{
- if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
- return __ioremap(offset, size);
-
if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
return phys_to_virt(offset);
+ if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+ return __ioremap(offset, size);
+
/*
* Someday this should check ACPI resources so we
* can do the right thing for hot-plugged regions.
diff --git a/trunk/arch/ia64/mm/tlb.c b/trunk/arch/ia64/mm/tlb.c
index 6a4eec9113e8..4dbbca0b5e9c 100644
--- a/trunk/arch/ia64/mm/tlb.c
+++ b/trunk/arch/ia64/mm/tlb.c
@@ -156,17 +156,19 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
nbits = purge.max_bits;
start &= ~((1UL << nbits) - 1);
-# ifdef CONFIG_SMP
- platform_global_tlb_purge(mm, start, end, nbits);
-# else
preempt_disable();
+#ifdef CONFIG_SMP
+ if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+ platform_global_tlb_purge(mm, start, end, nbits);
+ preempt_enable();
+ return;
+ }
+#endif
do {
ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits);
} while (start < end);
preempt_enable();
-# endif
-
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
EXPORT_SYMBOL(flush_tlb_range);
diff --git a/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 70db21f3df21..d917afa30b27 100644
--- a/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/trunk/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -110,7 +110,11 @@ static int sn_hwperf_geoid_to_cnode(char *location)
if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
return -1;
- for_each_node(cnode) {
+ /*
+ * FIXME: replace with cleaner for_each_XXX macro which addresses
+ * both compute and IO nodes once ACPI3.0 is available.
+ */
+ for (cnode = 0; cnode < num_cnodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id);
@@ -605,7 +609,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
if (cpu != SN_HWPERF_ARG_ANY_CPU) {
- if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
+ if (cpu >= NR_CPUS || !cpu_online(cpu)) {
r = -EINVAL;
goto out;
}
diff --git a/trunk/arch/m68k/kernel/m68k_ksyms.c b/trunk/arch/m68k/kernel/m68k_ksyms.c
index 3d7f2000b714..c3319514a85e 100644
--- a/trunk/arch/m68k/kernel/m68k_ksyms.c
+++ b/trunk/arch/m68k/kernel/m68k_ksyms.c
@@ -79,4 +79,3 @@ EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
-EXPORT_SYMBOL(get_wchan);
diff --git a/trunk/arch/m68knommu/kernel/m68k_ksyms.c b/trunk/arch/m68knommu/kernel/m68k_ksyms.c
index d844c755945a..f9b4ea16c099 100644
--- a/trunk/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/trunk/arch/m68knommu/kernel/m68k_ksyms.c
@@ -57,8 +57,6 @@ EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
-EXPORT_SYMBOL(get_wchan);
-
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
diff --git a/trunk/arch/mips/kernel/process.c b/trunk/arch/mips/kernel/process.c
index a8f435d82940..c66db5e5ab62 100644
--- a/trunk/arch/mips/kernel/process.c
+++ b/trunk/arch/mips/kernel/process.c
@@ -419,4 +419,3 @@ unsigned long get_wchan(struct task_struct *p)
return pc;
}
-EXPORT_SYMBOL(get_wchan);
diff --git a/trunk/arch/parisc/Kconfig b/trunk/arch/parisc/Kconfig
index 6b3c50964ca9..2fdf21989dc2 100644
--- a/trunk/arch/parisc/Kconfig
+++ b/trunk/arch/parisc/Kconfig
@@ -177,14 +177,10 @@ config ARCH_DISCONTIGMEM_DEFAULT
def_bool y
depends on ARCH_DISCONTIGMEM_ENABLE
+source "kernel/Kconfig.preempt"
source "kernel/Kconfig.hz"
source "mm/Kconfig"
-config PREEMPT
- bool
-# bool "Preemptible Kernel"
- default n
-
config COMPAT
def_bool y
depends on 64BIT
diff --git a/trunk/arch/parisc/configs/712_defconfig b/trunk/arch/parisc/configs/712_defconfig
index 3e013f55df64..41fd0696bbe7 100644
--- a/trunk/arch/parisc/configs/712_defconfig
+++ b/trunk/arch/parisc/configs/712_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc5-pa1
-# Fri Oct 21 23:04:34 2005
+# Linux kernel version: 2.6.16-pa6
+# Sun Mar 26 19:59:51 2006
#
CONFIG_PARISC=y
CONFIG_MMU=y
@@ -10,14 +10,11 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -32,17 +29,18 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
@@ -51,8 +49,10 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -65,6 +65,23 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
#
# Processor type and features
#
@@ -75,6 +92,10 @@ CONFIG_PA7100LC=y
# CONFIG_PA8X00 is not set
CONFIG_PA11=y
# CONFIG_SMP is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
@@ -86,7 +107,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_PREEMPT is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
# CONFIG_HPUX is not set
#
@@ -130,6 +151,7 @@ CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@@ -165,7 +187,12 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
# CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_NETFILTER_XTABLES is not set
#
# IP: Netfilter Configuration
@@ -182,64 +209,6 @@ CONFIG_IP_NF_TFTP=m
CONFIG_IP_NF_AMANDA=m
# CONFIG_IP_NF_PPTP is not set
CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-# CONFIG_IP_NF_MATCH_REALM is not set
-CONFIG_IP_NF_MATCH_SCTP=m
-# CONFIG_IP_NF_MATCH_DCCP is not set
-CONFIG_IP_NF_MATCH_COMMENT=m
-CONFIG_IP_NF_MATCH_CONNMARK=m
-CONFIG_IP_NF_MATCH_HASHLIMIT=m
-# CONFIG_IP_NF_MATCH_STRING is not set
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-# CONFIG_IP_NF_TARGET_NFQUEUE is not set
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-# CONFIG_IP_NF_TARGET_TTL is not set
-CONFIG_IP_NF_TARGET_CONNMARK=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
#
# DCCP Configuration (EXPERIMENTAL)
@@ -250,6 +219,11 @@ CONFIG_IP_NF_ARP_MANGLE=m
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@@ -263,8 +237,11 @@ CONFIG_LLC2=m
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -304,6 +281,7 @@ CONFIG_PARPORT=y
CONFIG_PARPORT_PC=m
# CONFIG_PARPORT_PC_FIFO is not set
# CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_NOT_PC=y
CONFIG_PARPORT_GSC=y
# CONFIG_PARPORT_1284 is not set
@@ -314,7 +292,6 @@ CONFIG_PARPORT_GSC=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_PARIDE is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
@@ -325,14 +302,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=6144
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
CONFIG_ATA_OVER_ETH=m
#
@@ -376,6 +345,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_PPA is not set
# CONFIG_SCSI_IMM is not set
@@ -407,7 +377,6 @@ CONFIG_MD_RAID1=m
#
# IEEE 1394 (FireWire) support
#
-# CONFIG_IEEE1394 is not set
#
# I2O device support
@@ -471,6 +440,7 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
# CONFIG_SLIP is not set
# CONFIG_SHAPER is not set
@@ -516,8 +486,8 @@ CONFIG_KEYBOARD_ATKBD_HP_KEYCODES=y
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_KEYBOARD_HIL_OLD=y
-# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_KEYBOARD_HIL=y
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_SERIAL=m
@@ -554,6 +524,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -598,12 +569,20 @@ CONFIG_MAX_RAW_DEVS=256
#
# TPM devices
#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
#
# Dallas's 1-wire bus
#
@@ -640,7 +619,6 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
# CONFIG_FB_MACMODES is not set
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
@@ -655,6 +633,7 @@ CONFIG_DUMMY_CONSOLE=y
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
CONFIG_STI_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
@@ -695,6 +674,8 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
@@ -723,6 +704,10 @@ CONFIG_SND_HARMONY=y
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
#
# USB Gadget Support
#
@@ -736,10 +721,9 @@ CONFIG_SND_HARMONY=y
#
# InfiniBand support
#
-# CONFIG_INFINIBAND is not set
#
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
#
@@ -765,6 +749,7 @@ CONFIG_XFS_EXPORT=y
# CONFIG_XFS_SECURITY is not set
# CONFIG_XFS_POSIX_ACL is not set
# CONFIG_XFS_RT is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
@@ -800,10 +785,10 @@ CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
# CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -821,7 +806,6 @@ CONFIG_RAMFS=y
# CONFIG_QNX4FS_FS is not set
# CONFIG_SYSV_FS is not set
CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
#
# Network File Systems
@@ -917,18 +901,22 @@ CONFIG_OPROFILE=m
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_IOREMAP is not set
# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_RODATA=y
#
# Security options
diff --git a/trunk/arch/parisc/configs/a500_defconfig b/trunk/arch/parisc/configs/a500_defconfig
index 959ad3c4e372..f3b812f04592 100644
--- a/trunk/arch/parisc/configs/a500_defconfig
+++ b/trunk/arch/parisc/configs/a500_defconfig
@@ -1031,8 +1031,8 @@ CONFIG_NLS_CODEPAGE_850=m
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
diff --git a/trunk/arch/parisc/configs/b180_defconfig b/trunk/arch/parisc/configs/b180_defconfig
index 37e98241ce4b..35093612ad2c 100644
--- a/trunk/arch/parisc/configs/b180_defconfig
+++ b/trunk/arch/parisc/configs/b180_defconfig
@@ -939,10 +939,10 @@ CONFIG_MSDOS_PARTITION=y
#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=m
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
+CONFIG_NLS_CODEPAGE_850=m
# CONFIG_NLS_CODEPAGE_852 is not set
# CONFIG_NLS_CODEPAGE_855 is not set
# CONFIG_NLS_CODEPAGE_857 is not set
@@ -962,8 +962,8 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -973,10 +973,10 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_ISO8859_15=m
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=m
#
# Kernel hacking
diff --git a/trunk/arch/parisc/configs/c3000_defconfig b/trunk/arch/parisc/configs/c3000_defconfig
index 0b1c8c1fa8a3..782906b644dd 100644
--- a/trunk/arch/parisc/configs/c3000_defconfig
+++ b/trunk/arch/parisc/configs/c3000_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc5-pa1
-# Fri Oct 21 23:06:31 2005
+# Linux kernel version: 2.6.16-pa6
+# Sun Mar 26 20:03:29 2006
#
CONFIG_PARISC=y
CONFIG_MMU=y
@@ -10,14 +10,11 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -32,28 +29,30 @@ CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -66,6 +65,23 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
#
# Processor type and features
#
@@ -78,6 +94,10 @@ CONFIG_PA20=y
CONFIG_PREFETCH=y
# CONFIG_64BIT is not set
# CONFIG_SMP is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
@@ -89,7 +109,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_PREEMPT is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_HPUX is not set
#
@@ -135,6 +155,7 @@ CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@@ -175,7 +196,12 @@ CONFIG_INET6_TUNNEL=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_DEBUG=y
+
+#
+# Core Netfilter Configuration
+#
# CONFIG_NETFILTER_NETLINK is not set
+# CONFIG_NETFILTER_XTABLES is not set
#
# IP: Netfilter Configuration
@@ -192,87 +218,11 @@ CONFIG_IP_NF_TFTP=m
CONFIG_IP_NF_AMANDA=m
# CONFIG_IP_NF_PPTP is not set
CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-# CONFIG_IP_NF_MATCH_REALM is not set
-# CONFIG_IP_NF_MATCH_SCTP is not set
-# CONFIG_IP_NF_MATCH_DCCP is not set
-# CONFIG_IP_NF_MATCH_COMMENT is not set
-# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
-# CONFIG_IP_NF_MATCH_STRING is not set
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-# CONFIG_IP_NF_TARGET_NFQUEUE is not set
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-# CONFIG_IP_NF_TARGET_TTL is not set
-# CONFIG_IP_NF_RAW is not set
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
#
# IPv6: Netfilter Configuration (EXPERIMENTAL)
#
# CONFIG_IP6_NF_QUEUE is not set
-CONFIG_IP6_NF_IPTABLES=m
-# CONFIG_IP6_NF_MATCH_LIMIT is not set
-CONFIG_IP6_NF_MATCH_MAC=m
-CONFIG_IP6_NF_MATCH_RT=m
-# CONFIG_IP6_NF_MATCH_OPTS is not set
-# CONFIG_IP6_NF_MATCH_FRAG is not set
-# CONFIG_IP6_NF_MATCH_HL is not set
-# CONFIG_IP6_NF_MATCH_MULTIPORT is not set
-CONFIG_IP6_NF_MATCH_OWNER=m
-# CONFIG_IP6_NF_MATCH_MARK is not set
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-# CONFIG_IP6_NF_MATCH_AHESP is not set
-CONFIG_IP6_NF_MATCH_LENGTH=m
-# CONFIG_IP6_NF_MATCH_EUI64 is not set
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_LOG=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-# CONFIG_IP6_NF_TARGET_NFQUEUE is not set
-CONFIG_IP6_NF_MANGLE=m
-# CONFIG_IP6_NF_TARGET_MARK is not set
-# CONFIG_IP6_NF_TARGET_HL is not set
-# CONFIG_IP6_NF_RAW is not set
#
# DCCP Configuration (EXPERIMENTAL)
@@ -283,6 +233,11 @@ CONFIG_IP6_NF_MANGLE=m
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@@ -295,8 +250,11 @@ CONFIG_IP6_NF_MANGLE=m
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -341,7 +299,6 @@ CONFIG_FW_LOADER=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
@@ -355,14 +312,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
# CONFIG_BLK_DEV_RAM is not set
CONFIG_BLK_DEV_RAM_COUNT=16
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
#
@@ -458,6 +407,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -466,7 +416,6 @@ CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
@@ -476,18 +425,18 @@ CONFIG_SCSI_SATA=y
CONFIG_SCSI_ATA_PIIX=m
# CONFIG_SCSI_SATA_MV is not set
# CONFIG_SCSI_SATA_NV is not set
-CONFIG_SCSI_SATA_PROMISE=m
+# CONFIG_SCSI_PDC_ADMA is not set
# CONFIG_SCSI_SATA_QSTOR is not set
+CONFIG_SCSI_SATA_PROMISE=m
# CONFIG_SCSI_SATA_SX4 is not set
CONFIG_SCSI_SATA_SIL=m
+# CONFIG_SCSI_SATA_SIL24 is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
CONFIG_SCSI_SATA_VIA=m
# CONFIG_SCSI_SATA_VITESSE is not set
CONFIG_SCSI_SATA_INTEL_COMBINED=y
-# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
@@ -496,18 +445,11 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+CONFIG_SCSI_SYM53C8XX_MMIO=y
# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_QLA_FC is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
@@ -633,6 +575,7 @@ CONFIG_E1000=m
# CONFIG_R8169 is not set
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=m
@@ -668,6 +611,7 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPP_MPPE is not set
CONFIG_PPPOE=m
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
@@ -744,6 +688,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -753,7 +698,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
#
# Non-8250 serial port support
#
-# CONFIG_SERIAL_MUX is not set
# CONFIG_PDC_CONSOLE is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
@@ -788,12 +732,19 @@ CONFIG_MAX_RAW_DEVS=256
# TPM devices
#
# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
#
# Dallas's 1-wire bus
#
@@ -830,7 +781,6 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_MODE_HELPERS is not set
# CONFIG_FB_TILEBLITTING is not set
@@ -840,6 +790,7 @@ CONFIG_FB_SOFT_CURSOR=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_STI=y
+# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
# CONFIG_FB_MATROX is not set
@@ -853,10 +804,7 @@ CONFIG_FB_STI=y
# CONFIG_FB_KYRO is not set
# CONFIG_FB_3DFX is not set
# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_CYBLA is not set
# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
#
@@ -866,6 +814,7 @@ CONFIG_DUMMY_CONSOLE=y
CONFIG_DUMMY_CONSOLE_COLUMNS=160
CONFIG_DUMMY_CONSOLE_ROWS=64
CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
CONFIG_STI_CONSOLE=y
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
@@ -898,23 +847,27 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
#
# Generic devices
#
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_AC97_BUS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_VIRMIDI is not set
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
-CONFIG_SND_AC97_CODEC=y
-CONFIG_SND_AC97_BUS=y
#
# PCI devices
#
+CONFIG_SND_AD1889=y
+# CONFIG_SND_AD1889_OPL3 is not set
# CONFIG_SND_ALI5451 is not set
# CONFIG_SND_ATIIXP is not set
# CONFIG_SND_ATIIXP_MODEM is not set
@@ -923,39 +876,38 @@ CONFIG_SND_AC97_BUS=y
# CONFIG_SND_AU8830 is not set
# CONFIG_SND_AZT3328 is not set
# CONFIG_SND_BT87X is not set
-# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
# CONFIG_SND_EMU10K1 is not set
# CONFIG_SND_EMU10K1X is not set
-# CONFIG_SND_CA0106 is not set
-# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MIXART is not set
-# CONFIG_SND_NM256 is not set
-# CONFIG_SND_RME32 is not set
-# CONFIG_SND_RME96 is not set
-# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_HDSP is not set
-# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_TRIDENT is not set
-# CONFIG_SND_YMFPCI is not set
-CONFIG_SND_AD1889=y
-# CONFIG_SND_AD1889_OPL3 is not set
-# CONFIG_SND_CMIPCI is not set
# CONFIG_SND_ENS1370 is not set
# CONFIG_SND_ENS1371 is not set
# CONFIG_SND_ES1938 is not set
# CONFIG_SND_ES1968 is not set
-# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VIA82XX_MODEM is not set
# CONFIG_SND_VX222 is not set
-# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_YMFPCI is not set
#
# USB devices
@@ -998,12 +950,15 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# USB Device Class drivers
#
# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
CONFIG_USB_PRINTER=m
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1015,12 +970,15 @@ CONFIG_USB_STORAGE_USBAT=y
CONFIG_USB_STORAGE_SDDR09=y
CONFIG_USB_STORAGE_SDDR55=y
CONFIG_USB_STORAGE_JUMPSHOT=y
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_LIBUSUAL is not set
#
# USB Input Devices
#
CONFIG_USB_HID=y
CONFIG_USB_HIDINPUT=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
CONFIG_USB_HIDDEV=y
# CONFIG_USB_AIPTEK is not set
@@ -1034,6 +992,7 @@ CONFIG_USB_HIDDEV=y
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
# CONFIG_USB_APPLETOUCH is not set
@@ -1108,7 +1067,7 @@ CONFIG_USB_LEGOTOWER=m
# CONFIG_INFINIBAND is not set
#
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
#
@@ -1130,6 +1089,7 @@ CONFIG_XFS_EXPORT=y
# CONFIG_XFS_SECURITY is not set
# CONFIG_XFS_POSIX_ACL is not set
# CONFIG_XFS_RT is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
@@ -1164,10 +1124,10 @@ CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
# CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -1225,10 +1185,10 @@ CONFIG_MSDOS_PARTITION=y
#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=m
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
+CONFIG_NLS_CODEPAGE_850=m
# CONFIG_NLS_CODEPAGE_852 is not set
# CONFIG_NLS_CODEPAGE_855 is not set
# CONFIG_NLS_CODEPAGE_857 is not set
@@ -1248,8 +1208,8 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -1259,10 +1219,10 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_ISO8859_15=m
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=m
#
# Profiling support
@@ -1274,18 +1234,22 @@ CONFIG_OPROFILE=m
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_IOREMAP is not set
# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_RODATA=y
#
# Security options
diff --git a/trunk/arch/parisc/defconfig b/trunk/arch/parisc/defconfig
index f38a4620d24f..59f7bc38e72e 100644
--- a/trunk/arch/parisc/defconfig
+++ b/trunk/arch/parisc/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc5-pa1
-# Fri Oct 21 23:01:33 2005
+# Linux kernel version: 2.6.16-pa6
+# Sun Mar 26 19:50:07 2006
#
CONFIG_PARISC=y
CONFIG_MMU=y
@@ -15,7 +15,6 @@ CONFIG_GENERIC_IRQ_PROBE=y
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -30,17 +29,18 @@ CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
@@ -49,14 +49,33 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
#
# CONFIG_MODULES is not set
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
#
# Processor type and features
#
@@ -67,6 +86,10 @@ CONFIG_PA7000=y
# CONFIG_PA8X00 is not set
CONFIG_PA11=y
# CONFIG_SMP is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
@@ -78,7 +101,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_PREEMPT is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
# CONFIG_HPUX is not set
#
@@ -132,6 +155,7 @@ CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@@ -174,6 +198,11 @@ CONFIG_IPV6=y
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@@ -186,8 +215,11 @@ CONFIG_IPV6=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -228,6 +260,7 @@ CONFIG_PARPORT_PC=y
# CONFIG_PARPORT_SERIAL is not set
# CONFIG_PARPORT_PC_FIFO is not set
# CONFIG_PARPORT_PC_SUPERIO is not set
+CONFIG_PARPORT_NOT_PC=y
CONFIG_PARPORT_GSC=y
# CONFIG_PARPORT_1284 is not set
@@ -254,14 +287,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
#
@@ -305,6 +330,7 @@ CONFIG_SCSI_SPI_ATTRS=y
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -331,7 +357,7 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+CONFIG_SCSI_SYM53C8XX_MMIO=y
# CONFIG_SCSI_IPR is not set
CONFIG_SCSI_ZALON=y
CONFIG_SCSI_NCR53C8XX_DEFAULT_TAGS=8
@@ -340,13 +366,7 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20
# CONFIG_SCSI_NCR53C8XX_PROFILE is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_QLA_FC is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_SIM710 is not set
# CONFIG_SCSI_DC395x is not set
@@ -471,6 +491,7 @@ CONFIG_ACENIC=y
# CONFIG_R8169 is not set
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
@@ -562,13 +583,13 @@ CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_KEYBOARD_HIL_OLD=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
CONFIG_KEYBOARD_HIL=y
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_MOUSE_HIL is not set
+CONFIG_MOUSE_HIL=y
CONFIG_INPUT_JOYSTICK=y
# CONFIG_JOYSTICK_ANALOG is not set
# CONFIG_JOYSTICK_A3D is not set
@@ -628,6 +649,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=13
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
@@ -675,12 +697,19 @@ CONFIG_GEN_RTC=y
# TPM devices
#
# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
#
# Dallas's 1-wire bus
#
@@ -691,6 +720,7 @@ CONFIG_GEN_RTC=y
#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_F71805F is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
#
@@ -718,7 +748,6 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_MODE_HELPERS is not set
# CONFIG_FB_TILEBLITTING is not set
@@ -728,6 +757,7 @@ CONFIG_FB_SOFT_CURSOR=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
CONFIG_FB_STI=y
+# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
# CONFIG_FB_MATROX is not set
@@ -741,9 +771,7 @@ CONFIG_FB_STI=y
# CONFIG_FB_KYRO is not set
# CONFIG_FB_3DFX is not set
# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_CYBLA is not set
# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
#
@@ -753,15 +781,28 @@ CONFIG_DUMMY_CONSOLE=y
CONFIG_DUMMY_CONSOLE_COLUMNS=160
CONFIG_DUMMY_CONSOLE_ROWS=64
CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
CONFIG_STI_CONSOLE=y
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
#
# Logo configuration
#
-# CONFIG_LOGO is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PARISC_CLUT224=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -781,23 +822,27 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
#
# Generic devices
#
+CONFIG_SND_AC97_CODEC=y
+CONFIG_SND_AC97_BUS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_VIRMIDI is not set
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
-CONFIG_SND_AC97_CODEC=y
-CONFIG_SND_AC97_BUS=y
#
# PCI devices
#
+CONFIG_SND_AD1889=y
+# CONFIG_SND_AD1889_OPL3 is not set
# CONFIG_SND_ALI5451 is not set
# CONFIG_SND_ATIIXP is not set
# CONFIG_SND_ATIIXP_MODEM is not set
@@ -806,39 +851,38 @@ CONFIG_SND_AC97_BUS=y
# CONFIG_SND_AU8830 is not set
# CONFIG_SND_AZT3328 is not set
# CONFIG_SND_BT87X is not set
-# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
# CONFIG_SND_EMU10K1 is not set
# CONFIG_SND_EMU10K1X is not set
-# CONFIG_SND_CA0106 is not set
-# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MIXART is not set
-# CONFIG_SND_NM256 is not set
-# CONFIG_SND_RME32 is not set
-# CONFIG_SND_RME96 is not set
-# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_HDSP is not set
-# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_TRIDENT is not set
-# CONFIG_SND_YMFPCI is not set
-CONFIG_SND_AD1889=y
-# CONFIG_SND_AD1889_OPL3 is not set
-# CONFIG_SND_CMIPCI is not set
# CONFIG_SND_ENS1370 is not set
# CONFIG_SND_ENS1371 is not set
# CONFIG_SND_ES1938 is not set
# CONFIG_SND_ES1968 is not set
-# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VIA82XX_MODEM is not set
# CONFIG_SND_VX222 is not set
-# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_YMFPCI is not set
#
# USB devices
@@ -888,14 +932,18 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# USB Device Class drivers
#
# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
-# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
#
# CONFIG_USB_STORAGE is not set
+# CONFIG_USB_LIBUSUAL is not set
#
# USB Input Devices
@@ -918,6 +966,7 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
# CONFIG_USB_APPLETOUCH is not set
@@ -994,7 +1043,7 @@ CONFIG_USB_MON=y
# CONFIG_INFINIBAND is not set
#
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
#
@@ -1011,6 +1060,7 @@ CONFIG_JBD=y
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
@@ -1045,6 +1095,7 @@ CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
# CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -1151,18 +1202,22 @@ CONFIG_OPROFILE=y
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_IOREMAP is not set
# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_RODATA=y
#
# Security options
diff --git a/trunk/arch/parisc/kernel/cache.c b/trunk/arch/parisc/kernel/cache.c
index d8a4ca021aac..360b7391cb8c 100644
--- a/trunk/arch/parisc/kernel/cache.c
+++ b/trunk/arch/parisc/kernel/cache.c
@@ -89,7 +89,7 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
- flush_kernel_dcache_page(page_address(page));
+ flush_kernel_dcache_page(page);
clear_bit(PG_dcache_dirty, &page->flags);
}
}
@@ -278,7 +278,7 @@ void flush_dcache_page(struct page *page)
return;
}
- flush_kernel_dcache_page(page_address(page));
+ flush_kernel_dcache_page(page);
if (!mapping)
return;
@@ -317,7 +317,7 @@ EXPORT_SYMBOL(flush_dcache_page);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
-EXPORT_SYMBOL(flush_kernel_dcache_page);
+EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
diff --git a/trunk/arch/parisc/kernel/entry.S b/trunk/arch/parisc/kernel/entry.S
index 9af4b22a6d77..7c95d7663c29 100644
--- a/trunk/arch/parisc/kernel/entry.S
+++ b/trunk/arch/parisc/kernel/entry.S
@@ -563,10 +563,10 @@
extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
- /* Get rid of prot bits and convert to page addr for iitlbt */
+ /* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */
depd %r0,63,PAGE_SHIFT,\pte
- extrd,u \pte,56,32,\pte
+ extrd,s \pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
.endm
/* Identical macro to make_insert_tlb above, except it
@@ -584,7 +584,7 @@
/* Get rid of prot bits and convert to page addr for iitlba */
- depi 0,31,12,\pte
+ depi 0,31,PAGE_SHIFT,\pte
extru \pte,24,25,\pte
.endm
@@ -1014,14 +1014,21 @@ intr_restore:
nop
nop
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt intr_restore
+#endif /* !CONFIG_PREEMPT */
+
.import schedule,code
intr_do_resched:
- /* Only do reschedule if we are returning to user space */
+ /* Only call schedule on return to userspace. If we're returning
+ * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+ * we jump back to intr_restore.
+ */
LDREG PT_IASQ0(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ CMPIB= 0, %r20, intr_do_preempt
nop
LDREG PT_IASQ1(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ CMPIB= 0, %r20, intr_do_preempt
nop
#ifdef CONFIG_64BIT
@@ -1037,6 +1044,32 @@ intr_do_resched:
#endif
ldo R%intr_check_sig(%r2), %r2
+ /* preempt the current task on returning to kernel
+ * mode from an interrupt, iff need_resched is set,
+ * and preempt_count is 0. otherwise, we continue on
+ * our merry way back to the current running task.
+ */
+#ifdef CONFIG_PREEMPT
+ .import preempt_schedule_irq,code
+intr_do_preempt:
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+
+ /* current_thread_info()->preempt_count */
+ mfctl %cr30, %r1
+ LDREG TI_PRE_COUNT(%r1), %r19
+ CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
+ nop /* prev insn branched backwards */
+
+ /* check if we interrupted a critical path */
+ LDREG PT_PSW(%r16), %r20
+ bb,<,n %r20, 31 - PSW_SM_I, intr_restore
+ nop
+
+ BL preempt_schedule_irq, %r2
+ nop
+
+ b intr_restore /* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
.import do_signal,code
intr_do_signal:
diff --git a/trunk/arch/parisc/kernel/pacache.S b/trunk/arch/parisc/kernel/pacache.S
index 9534ee17b9be..7a4f07e8d3c3 100644
--- a/trunk/arch/parisc/kernel/pacache.S
+++ b/trunk/arch/parisc/kernel/pacache.S
@@ -621,9 +621,9 @@ __clear_user_page_asm:
.procend
- .export flush_kernel_dcache_page
+ .export flush_kernel_dcache_page_asm
-flush_kernel_dcache_page:
+flush_kernel_dcache_page_asm:
.proc
.callinfo NO_CALLS
.entry
diff --git a/trunk/arch/parisc/kernel/parisc_ksyms.c b/trunk/arch/parisc/kernel/parisc_ksyms.c
index 1d00c365f2b1..47ca5c0a323b 100644
--- a/trunk/arch/parisc/kernel/parisc_ksyms.c
+++ b/trunk/arch/parisc/kernel/parisc_ksyms.c
@@ -30,22 +30,7 @@
#include
#include
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strpbrk);
#include
@@ -82,16 +67,12 @@ EXPORT_SYMBOL($global$);
#endif
#include
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io);
#include
-EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(sys_lseek);
-EXPORT_SYMBOL(sys_read);
EXPORT_SYMBOL(sys_write);
#include
diff --git a/trunk/arch/parisc/kernel/pdc_chassis.c b/trunk/arch/parisc/kernel/pdc_chassis.c
index 0cea6958f427..a45e2e2ffd9f 100644
--- a/trunk/arch/parisc/kernel/pdc_chassis.c
+++ b/trunk/arch/parisc/kernel/pdc_chassis.c
@@ -5,9 +5,8 @@
* Copyright (C) 2002-2004 Thibaut VARENE
*
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/trunk/arch/parisc/kernel/perf.c b/trunk/arch/parisc/kernel/perf.c
index 53f861c82f93..ac8ee205c351 100644
--- a/trunk/arch/parisc/kernel/perf.c
+++ b/trunk/arch/parisc/kernel/perf.c
@@ -805,7 +805,7 @@ static int perf_write_image(uint64_t *memaddr)
return -1;
}
- runway = ioremap(cpu_device->hpa.start, 4096);
+ runway = ioremap_nocache(cpu_device->hpa.start, 4096);
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
diff --git a/trunk/arch/parisc/kernel/syscall_table.S b/trunk/arch/parisc/kernel/syscall_table.S
index 89b6c56ea0a8..bbeeb614cfab 100644
--- a/trunk/arch/parisc/kernel/syscall_table.S
+++ b/trunk/arch/parisc/kernel/syscall_table.S
@@ -287,7 +287,7 @@
ENTRY_SAME(chown) /* 180 */
/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
ENTRY_COMP(setsockopt)
- ENTRY_SAME(getsockopt)
+ ENTRY_COMP(getsockopt)
ENTRY_COMP(sendmsg)
ENTRY_COMP(recvmsg)
ENTRY_SAME(semop) /* 185 */
diff --git a/trunk/arch/parisc/lib/iomap.c b/trunk/arch/parisc/lib/iomap.c
index 01bec8fcbd0d..f4a811690ab3 100644
--- a/trunk/arch/parisc/lib/iomap.c
+++ b/trunk/arch/parisc/lib/iomap.c
@@ -263,11 +263,7 @@ static const struct iomap_ops iomem_ops = {
const struct iomap_ops *iomap_ops[8] = {
[0] = &ioport_ops,
-#ifdef CONFIG_DEBUG_IOREMAP
- [6] = &iomem_ops,
-#else
[7] = &iomem_ops
-#endif
};
diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c
index 852eda3953dc..3796be67cd53 100644
--- a/trunk/arch/parisc/mm/init.c
+++ b/trunk/arch/parisc/mm/init.c
@@ -1013,9 +1013,9 @@ void flush_tlb_all(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
-#if 0
- if (start < end)
- printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ if (start >= end)
+ return;
+ printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
@@ -1023,6 +1023,5 @@ void free_initrd_mem(unsigned long start, unsigned long end)
num_physpages++;
totalram_pages++;
}
-#endif
}
#endif
diff --git a/trunk/arch/parisc/mm/ioremap.c b/trunk/arch/parisc/mm/ioremap.c
index edd9a9559cba..0db12818d7bc 100644
--- a/trunk/arch/parisc/mm/ioremap.c
+++ b/trunk/arch/parisc/mm/ioremap.c
@@ -72,7 +72,6 @@ remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
return 0;
}
-#if USE_HPPA_IOREMAP
static int
remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
@@ -114,31 +113,6 @@ remap_area_pages(unsigned long address, unsigned long phys_addr,
return error;
}
-#endif /* USE_HPPA_IOREMAP */
-
-#ifdef CONFIG_DEBUG_IOREMAP
-static unsigned long last = 0;
-
-void gsc_bad_addr(unsigned long addr)
-{
- if (time_after(jiffies, last + HZ*10)) {
- printk("gsc_foo() called with bad address 0x%lx\n", addr);
- dump_stack();
- last = jiffies;
- }
-}
-EXPORT_SYMBOL(gsc_bad_addr);
-
-void __raw_bad_addr(const volatile void __iomem *addr)
-{
- if (time_after(jiffies, last + HZ*10)) {
- printk("__raw_foo() called with bad address 0x%p\n", addr);
- dump_stack();
- last = jiffies;
- }
-}
-EXPORT_SYMBOL(__raw_bad_addr);
-#endif
/*
* Generic mapping function (not visible outside):
@@ -154,26 +128,19 @@ EXPORT_SYMBOL(__raw_bad_addr);
*/
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
-#if !(USE_HPPA_IOREMAP)
+ void *addr;
+ struct vm_struct *area;
+ unsigned long offset, last_addr;
+#ifdef CONFIG_EISA
unsigned long end = phys_addr + size - 1;
/* Support EISA addresses */
- if ((phys_addr >= 0x00080000 && end < 0x000fffff)
- || (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
- phys_addr |= 0xfc000000;
+ if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
+ (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+ phys_addr |= F_EXTEND(0xfc000000);
}
-
-#ifdef CONFIG_DEBUG_IOREMAP
- return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
-#else
- return (void __iomem *)phys_addr;
#endif
-#else
- void *addr;
- struct vm_struct *area;
- unsigned long offset, last_addr;
-
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
@@ -217,15 +184,12 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
return (void __iomem *) (offset + (char *)addr);
-#endif
}
+EXPORT_SYMBOL(__ioremap);
void iounmap(void __iomem *addr)
{
-#if !(USE_HPPA_IOREMAP)
- return;
-#else
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
-#endif
}
+EXPORT_SYMBOL(iounmap);
diff --git a/trunk/arch/powerpc/kernel/crash_dump.c b/trunk/arch/powerpc/kernel/crash_dump.c
index 211d72653ea6..764d07329716 100644
--- a/trunk/arch/powerpc/kernel/crash_dump.c
+++ b/trunk/arch/powerpc/kernel/crash_dump.c
@@ -61,7 +61,7 @@ static int __init parse_elfcorehdr(char *p)
if (p)
elfcorehdr_addr = memparse(p, &p);
- return 0;
+ return 1;
}
__setup("elfcorehdr=", parse_elfcorehdr);
#endif
@@ -71,7 +71,7 @@ static int __init parse_savemaxmem(char *p)
if (p)
saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
- return 0;
+ return 1;
}
__setup("savemaxmem=", parse_savemaxmem);
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index 706090c99f47..2dd47d2dd998 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -834,7 +834,6 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
-EXPORT_SYMBOL(get_wchan);
static int kstack_depth_to_print = 64;
diff --git a/trunk/arch/powerpc/kernel/vdso32/sigtramp.S b/trunk/arch/powerpc/kernel/vdso32/sigtramp.S
index e04642781917..0c6a37b29dde 100644
--- a/trunk/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/trunk/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -261,7 +261,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt32)
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
- .string "zR" /* NUL-terminated augmentation string */
+ .string "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 67 /* Return address register column, ap */
diff --git a/trunk/arch/powerpc/kernel/vdso64/sigtramp.S b/trunk/arch/powerpc/kernel/vdso64/sigtramp.S
index 31b604ab56de..7479edb101b8 100644
--- a/trunk/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/trunk/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -263,7 +263,7 @@ V_FUNCTION_END(__kernel_sigtramp_rt64)
.Lcie_start:
.long 0 /* CIE ID */
.byte 1 /* Version number */
- .string "zR" /* NUL-terminated augmentation string */
+ .string "zRS" /* NUL-terminated augmentation string */
.uleb128 4 /* Code alignment factor */
.sleb128 -8 /* Data alignment factor */
.byte 67 /* Return address register column, ap */
diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c
index 2b8841f85534..343120c9223d 100644
--- a/trunk/arch/s390/kernel/smp.c
+++ b/trunk/arch/s390/kernel/smp.c
@@ -801,7 +801,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
*/
print_cpu_info(&S390_lowcore.cpu_data);
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
lowcore_ptr[i] = (struct _lowcore *)
__get_free_pages(GFP_KERNEL|GFP_DMA,
sizeof(void*) == 8 ? 1 : 0);
@@ -831,7 +831,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#endif
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
- for_each_cpu(cpu)
+ for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
}
@@ -868,7 +868,7 @@ static int __init topology_init(void)
int cpu;
int ret;
- for_each_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/trunk/arch/sh/kernel/cpu/init.c b/trunk/arch/sh/kernel/cpu/init.c
index cf94e8ef17c5..868e68b28880 100644
--- a/trunk/arch/sh/kernel/cpu/init.c
+++ b/trunk/arch/sh/kernel/cpu/init.c
@@ -30,7 +30,7 @@ static int x##_disabled __initdata = 0; \
static int __init x##_setup(char *opts) \
{ \
x##_disabled = 1; \
- return 0; \
+ return 1; \
} \
__setup("no" __stringify(x), x##_setup);
diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c
index 7ee4ca203616..bb229ef030f3 100644
--- a/trunk/arch/sh/kernel/setup.c
+++ b/trunk/arch/sh/kernel/setup.c
@@ -401,7 +401,7 @@ static int __init topology_init(void)
{
int cpu_id;
- for_each_cpu(cpu_id)
+ for_each_possible_cpu(cpu_id)
register_cpu(&cpu[cpu_id], cpu_id, NULL);
return 0;
diff --git a/trunk/arch/um/Kconfig b/trunk/arch/um/Kconfig
index 5982fe2753e0..05fbb20636cb 100644
--- a/trunk/arch/um/Kconfig
+++ b/trunk/arch/um/Kconfig
@@ -22,6 +22,9 @@ config SBUS
config PCI
bool
+config PCMCIA
+ bool
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/trunk/arch/um/Makefile b/trunk/arch/um/Makefile
index 8d14c7a831be..24790bed2054 100644
--- a/trunk/arch/um/Makefile
+++ b/trunk/arch/um/Makefile
@@ -20,7 +20,7 @@ core-y += $(ARCH_DIR)/kernel/ \
# Have to precede the include because the included Makefiles reference them.
SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \
- module.h vm-flags.h elf.h ldt.h
+ module.h vm-flags.h elf.h host_ldt.h
SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header))
# XXX: The "os" symlink is only used by arch/um/include/os.h, which includes
@@ -129,7 +129,7 @@ CPPFLAGS_vmlinux.lds = -U$(SUBARCH) \
-DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
-DELF_FORMAT="$(ELF_FORMAT)" $(CPP_MODE-y) \
-DKERNEL_STACK_SIZE=$(STACK_SIZE) \
- -DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap_fin.o
+ -DUNMAP_PATH=arch/um/sys-$(SUBARCH)/unmap.o
#The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
@@ -150,8 +150,7 @@ CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \
$(ARCH_DIR)/include/user_constants.h \
$(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch
-MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \
- $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os
+MRPROPER_FILES += $(ARCH_SYMLINKS)
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
diff --git a/trunk/arch/um/Makefile-x86_64 b/trunk/arch/um/Makefile-x86_64
index 38df311e75dc..dfd88b652fbe 100644
--- a/trunk/arch/um/Makefile-x86_64
+++ b/trunk/arch/um/Makefile-x86_64
@@ -1,7 +1,7 @@
# Copyright 2003 - 2004 Pathscale, Inc
# Released under the GPL
-libs-y += arch/um/sys-x86_64/
+core-y += arch/um/sys-x86_64/
START := 0x60000000
#We #undef __x86_64__ for kernelspace, not for userspace where
diff --git a/trunk/arch/um/drivers/daemon_kern.c b/trunk/arch/um/drivers/daemon_kern.c
index a61b7b46bc02..53d09ed78b42 100644
--- a/trunk/arch/um/drivers/daemon_kern.c
+++ b/trunk/arch/um/drivers/daemon_kern.c
@@ -95,18 +95,7 @@ static struct transport daemon_transport = {
static int register_daemon(void)
{
register_transport(&daemon_transport);
- return(1);
+ return 0;
}
__initcall(register_daemon);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/drivers/harddog_kern.c b/trunk/arch/um/drivers/harddog_kern.c
index 49acb2badf32..d18a974735e6 100644
--- a/trunk/arch/um/drivers/harddog_kern.c
+++ b/trunk/arch/um/drivers/harddog_kern.c
@@ -104,7 +104,7 @@ static int harddog_release(struct inode *inode, struct file *file)
extern int ping_watchdog(int fd);
-static ssize_t harddog_write(struct file *file, const char *data, size_t len,
+static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
loff_t *ppos)
{
/*
@@ -118,6 +118,7 @@ static ssize_t harddog_write(struct file *file, const char *data, size_t len,
static int harddog_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ void __user *argp= (void __user *)arg;
static struct watchdog_info ident = {
WDIOC_SETTIMEOUT,
0,
@@ -127,13 +128,12 @@ static int harddog_ioctl(struct inode *inode, struct file *file,
default:
return -ENOTTY;
case WDIOC_GETSUPPORT:
- if(copy_to_user((struct harddog_info *)arg, &ident,
- sizeof(ident)))
+ if(copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
- return put_user(0,(int *)arg);
+ return put_user(0,(int __user *)argp);
case WDIOC_KEEPALIVE:
return(ping_watchdog(harddog_out_fd));
}
diff --git a/trunk/arch/um/drivers/hostaudio_kern.c b/trunk/arch/um/drivers/hostaudio_kern.c
index 59602b81b240..37232f908cd7 100644
--- a/trunk/arch/um/drivers/hostaudio_kern.c
+++ b/trunk/arch/um/drivers/hostaudio_kern.c
@@ -67,8 +67,8 @@ MODULE_PARM_DESC(mixer, MIXER_HELP);
/* /dev/dsp file operations */
-static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count,
- loff_t *ppos)
+static ssize_t hostaudio_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
@@ -94,7 +94,7 @@ static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count,
return(err);
}
-static ssize_t hostaudio_write(struct file *file, const char *buffer,
+static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
@@ -152,7 +152,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
- if(get_user(data, (int *) arg))
+ if(get_user(data, (int __user *) arg))
return(-EFAULT);
break;
default:
@@ -168,7 +168,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
- if(put_user(data, (int *) arg))
+ if(put_user(data, (int __user *) arg))
return(-EFAULT);
break;
default:
diff --git a/trunk/arch/um/drivers/mcast_kern.c b/trunk/arch/um/drivers/mcast_kern.c
index c9b078fba03e..3a7af18cf944 100644
--- a/trunk/arch/um/drivers/mcast_kern.c
+++ b/trunk/arch/um/drivers/mcast_kern.c
@@ -124,18 +124,7 @@ static struct transport mcast_transport = {
static int register_mcast(void)
{
register_transport(&mcast_transport);
- return(1);
+ return 0;
}
__initcall(register_mcast);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c
index 1488816588ea..28e3760e8b98 100644
--- a/trunk/arch/um/drivers/mconsole_kern.c
+++ b/trunk/arch/um/drivers/mconsole_kern.c
@@ -20,6 +20,8 @@
#include "linux/namei.h"
#include "linux/proc_fs.h"
#include "linux/syscalls.h"
+#include "linux/list.h"
+#include "linux/mm.h"
#include "linux/console.h"
#include "asm/irq.h"
#include "asm/uaccess.h"
@@ -347,6 +349,142 @@ static struct mc_device *mconsole_find_dev(char *name)
return(NULL);
}
+#define UNPLUGGED_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(unsigned long))
+
+struct unplugged_pages {
+ struct list_head list;
+ void *pages[UNPLUGGED_PER_PAGE];
+};
+
+static unsigned long long unplugged_pages_count = 0;
+static struct list_head unplugged_pages = LIST_HEAD_INIT(unplugged_pages);
+static int unplug_index = UNPLUGGED_PER_PAGE;
+
+static int mem_config(char *str)
+{
+ unsigned long long diff;
+ int err = -EINVAL, i, add;
+ char *ret;
+
+ if(str[0] != '=')
+ goto out;
+
+ str++;
+ if(str[0] == '-')
+ add = 0;
+ else if(str[0] == '+'){
+ add = 1;
+ }
+ else goto out;
+
+ str++;
+ diff = memparse(str, &ret);
+ if(*ret != '\0')
+ goto out;
+
+ diff /= PAGE_SIZE;
+
+ for(i = 0; i < diff; i++){
+ struct unplugged_pages *unplugged;
+ void *addr;
+
+ if(add){
+ if(list_empty(&unplugged_pages))
+ break;
+
+ unplugged = list_entry(unplugged_pages.next,
+ struct unplugged_pages, list);
+ if(unplug_index > 0)
+ addr = unplugged->pages[--unplug_index];
+ else {
+ list_del(&unplugged->list);
+ addr = unplugged;
+ unplug_index = UNPLUGGED_PER_PAGE;
+ }
+
+ free_page((unsigned long) addr);
+ unplugged_pages_count--;
+ }
+ else {
+ struct page *page;
+
+ page = alloc_page(GFP_ATOMIC);
+ if(page == NULL)
+ break;
+
+ unplugged = page_address(page);
+ if(unplug_index == UNPLUGGED_PER_PAGE){
+ INIT_LIST_HEAD(&unplugged->list);
+ list_add(&unplugged->list, &unplugged_pages);
+ unplug_index = 0;
+ }
+ else {
+ struct list_head *entry = unplugged_pages.next;
+ addr = unplugged;
+
+ unplugged = list_entry(entry,
+ struct unplugged_pages,
+ list);
+ unplugged->pages[unplug_index++] = addr;
+ err = os_drop_memory(addr, PAGE_SIZE);
+ if(err)
+ printk("Failed to release memory - "
+ "errno = %d\n", err);
+ }
+
+ unplugged_pages_count++;
+ }
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+static int mem_get_config(char *name, char *str, int size, char **error_out)
+{
+ char buf[sizeof("18446744073709551615")];
+ int len = 0;
+
+ sprintf(buf, "%ld", uml_physmem);
+ CONFIG_CHUNK(str, size, len, buf, 1);
+
+ return len;
+}
+
+static int mem_id(char **str, int *start_out, int *end_out)
+{
+ *start_out = 0;
+ *end_out = 0;
+
+ return 0;
+}
+
+static int mem_remove(int n)
+{
+ return -EBUSY;
+}
+
+static struct mc_device mem_mc = {
+ .name = "mem",
+ .config = mem_config,
+ .get_config = mem_get_config,
+ .id = mem_id,
+ .remove = mem_remove,
+};
+
+static int mem_mc_init(void)
+{
+ if(can_drop_memory())
+ mconsole_register_dev(&mem_mc);
+ else printk("Can't release memory to the host - memory hotplug won't "
+ "be supported\n");
+ return 0;
+}
+
+__initcall(mem_mc_init);
+
#define CONFIG_BUF_SIZE 64
static void mconsole_get_config(int (*get_config)(char *, char *, int,
@@ -478,7 +616,7 @@ static void console_write(struct console *console, const char *string,
return;
while(1){
- n = min(len, ARRAY_SIZE(console_buf) - console_index);
+ n = min((size_t)len, ARRAY_SIZE(console_buf) - console_index);
strncpy(&console_buf[console_index], string, n);
console_index += n;
string += n;
diff --git a/trunk/arch/um/drivers/pcap_kern.c b/trunk/arch/um/drivers/pcap_kern.c
index 07c80f2156ef..466ff2c2f918 100644
--- a/trunk/arch/um/drivers/pcap_kern.c
+++ b/trunk/arch/um/drivers/pcap_kern.c
@@ -106,18 +106,7 @@ static struct transport pcap_transport = {
static int register_pcap(void)
{
register_transport(&pcap_transport);
- return(1);
+ return 0;
}
__initcall(register_pcap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/drivers/slip_kern.c b/trunk/arch/um/drivers/slip_kern.c
index a62f5ef445cf..163ee0d5f75e 100644
--- a/trunk/arch/um/drivers/slip_kern.c
+++ b/trunk/arch/um/drivers/slip_kern.c
@@ -93,18 +93,7 @@ static struct transport slip_transport = {
static int register_slip(void)
{
register_transport(&slip_transport);
- return(1);
+ return 0;
}
__initcall(register_slip);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/drivers/slirp_kern.c b/trunk/arch/um/drivers/slirp_kern.c
index 33d7982be5d3..95e50c943e14 100644
--- a/trunk/arch/um/drivers/slirp_kern.c
+++ b/trunk/arch/um/drivers/slirp_kern.c
@@ -77,7 +77,7 @@ static int slirp_setup(char *str, char **mac_out, void *data)
int i=0;
*init = ((struct slirp_init)
- { argw : { { "slirp", NULL } } });
+ { .argw = { { "slirp", NULL } } });
str = split_if_spec(str, mac_out, NULL);
@@ -116,18 +116,7 @@ static struct transport slirp_transport = {
static int register_slirp(void)
{
register_transport(&slirp_transport);
- return(1);
+ return 0;
}
__initcall(register_slirp);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/drivers/ubd_kern.c b/trunk/arch/um/drivers/ubd_kern.c
index 0336575d2448..0897852b09a3 100644
--- a/trunk/arch/um/drivers/ubd_kern.c
+++ b/trunk/arch/um/drivers/ubd_kern.c
@@ -891,7 +891,7 @@ int ubd_driver_init(void){
SA_INTERRUPT, "ubd", ubd_dev);
if(err != 0)
printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err);
- return(err);
+ return 0;
}
device_initcall(ubd_driver_init);
diff --git a/trunk/arch/um/include/kern_util.h b/trunk/arch/um/include/kern_util.h
index 07176d92e1c9..42557130a408 100644
--- a/trunk/arch/um/include/kern_util.h
+++ b/trunk/arch/um/include/kern_util.h
@@ -116,7 +116,11 @@ extern void *get_current(void);
extern struct task_struct *get_task(int pid, int require);
extern void machine_halt(void);
extern int is_syscall(unsigned long addr);
-extern void arch_switch(void);
+
+extern void arch_switch_to_tt(struct task_struct *from, struct task_struct *to);
+
+extern void arch_switch_to_skas(struct task_struct *from, struct task_struct *to);
+
extern void free_irq(unsigned int, void *);
extern int cpu(void);
diff --git a/trunk/arch/um/include/line.h b/trunk/arch/um/include/line.h
index 6f4d680dc1d4..6ac0f8252e21 100644
--- a/trunk/arch/um/include/line.h
+++ b/trunk/arch/um/include/line.h
@@ -58,23 +58,17 @@ struct line {
};
#define LINE_INIT(str, d) \
- { init_str : str, \
- init_pri : INIT_STATIC, \
- valid : 1, \
- throttled : 0, \
- lock : SPIN_LOCK_UNLOCKED, \
- buffer : NULL, \
- head : NULL, \
- tail : NULL, \
- sigio : 0, \
- driver : d, \
- have_irq : 0 }
+ { .init_str = str, \
+ .init_pri = INIT_STATIC, \
+ .valid = 1, \
+ .lock = SPIN_LOCK_UNLOCKED, \
+ .driver = d }
struct lines {
int num;
};
-#define LINES_INIT(n) { num : n }
+#define LINES_INIT(n) { .num = n }
extern void line_close(struct tty_struct *tty, struct file * filp);
extern int line_open(struct line *lines, struct tty_struct *tty);
diff --git a/trunk/arch/um/include/mem_user.h b/trunk/arch/um/include/mem_user.h
index a1064c5823bf..a54514d2cc3a 100644
--- a/trunk/arch/um/include/mem_user.h
+++ b/trunk/arch/um/include/mem_user.h
@@ -49,7 +49,6 @@ extern int iomem_size;
extern unsigned long host_task_size;
extern unsigned long task_size;
-extern void check_devanon(void);
extern int init_mem_user(void);
extern void setup_memory(void *entry);
extern unsigned long find_iomem(char *driver, unsigned long *len_out);
diff --git a/trunk/arch/um/include/os.h b/trunk/arch/um/include/os.h
index d3d1bc6074ef..f88856c28a66 100644
--- a/trunk/arch/um/include/os.h
+++ b/trunk/arch/um/include/os.h
@@ -13,6 +13,7 @@
#include "kern_util.h"
#include "skas/mm_id.h"
#include "irq_user.h"
+#include "sysdep/tls.h"
#define OS_TYPE_FILE 1
#define OS_TYPE_DIR 2
@@ -172,6 +173,7 @@ extern int os_fchange_dir(int fd);
extern void os_early_checks(void);
extern int can_do_skas(void);
extern void os_check_bugs(void);
+extern void check_host_supports_tls(int *supports_tls, int *tls_min);
/* Make sure they are clear when running in TT mode. Required by
* SEGV_MAYBE_FIXABLE */
@@ -205,6 +207,8 @@ extern int os_map_memory(void *virt, int fd, unsigned long long off,
extern int os_protect_memory(void *addr, unsigned long len,
int r, int w, int x);
extern int os_unmap_memory(void *addr, int len);
+extern int os_drop_memory(void *addr, int length);
+extern int can_drop_memory(void);
extern void os_flush_stdout(void);
/* tt.c
@@ -234,8 +238,12 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
int stack_order);
extern int helper_wait(int pid);
-/* umid.c */
+/* tls.c */
+extern int os_set_thread_area(user_desc_t *info, int pid);
+extern int os_get_thread_area(user_desc_t *info, int pid);
+
+/* umid.c */
extern int umid_file_name(char *name, char *buf, int len);
extern int set_umid(char *name);
extern char *get_umid(void);
diff --git a/trunk/arch/um/include/sysdep-i386/checksum.h b/trunk/arch/um/include/sysdep-i386/checksum.h
index 7d3d202d7fff..052bb061a978 100644
--- a/trunk/arch/um/include/sysdep-i386/checksum.h
+++ b/trunk/arch/um/include/sysdep-i386/checksum.h
@@ -48,7 +48,8 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
*/
static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
+unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
+ unsigned char *dst,
int len, int sum, int *err_ptr)
{
if(copy_from_user(dst, src, len)){
@@ -192,7 +193,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
*/
#define HAVE_CSUM_COPY_USER
static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
- unsigned char *dst,
+ unsigned char __user *dst,
int len, int sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len)){
diff --git a/trunk/arch/um/include/sysdep-i386/ptrace.h b/trunk/arch/um/include/sysdep-i386/ptrace.h
index c8ee9559f3ab..6670cc992ecb 100644
--- a/trunk/arch/um/include/sysdep-i386/ptrace.h
+++ b/trunk/arch/um/include/sysdep-i386/ptrace.h
@@ -14,7 +14,12 @@
#define MAX_REG_NR (UM_FRAME_SIZE / sizeof(unsigned long))
#define MAX_REG_OFFSET (UM_FRAME_SIZE)
+#ifdef UML_CONFIG_PT_PROXY
extern void update_debugregs(int seq);
+#else
+static inline void update_debugregs(int seq) {}
+#endif
+
/* syscall emulation path in ptrace */
diff --git a/trunk/arch/um/include/sysdep-i386/tls.h b/trunk/arch/um/include/sysdep-i386/tls.h
new file mode 100644
index 000000000000..918fd3c5ff9c
--- /dev/null
+++ b/trunk/arch/um/include/sysdep-i386/tls.h
@@ -0,0 +1,32 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from , which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+# include
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+
+#define GDT_ENTRY_TLS_MIN_I386 6
+#define GDT_ENTRY_TLS_MIN_X86_64 12
+
+#endif /* _SYSDEP_TLS_H */
diff --git a/trunk/arch/um/include/sysdep-x86_64/tls.h b/trunk/arch/um/include/sysdep-x86_64/tls.h
new file mode 100644
index 000000000000..35f19f25bd3b
--- /dev/null
+++ b/trunk/arch/um/include/sysdep-x86_64/tls.h
@@ -0,0 +1,29 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from , which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ unsigned int lm:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+# include
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+#endif /* _SYSDEP_TLS_H */
diff --git a/trunk/arch/um/include/user_util.h b/trunk/arch/um/include/user_util.h
index 992a7e1e0fca..fe0c29b5144d 100644
--- a/trunk/arch/um/include/user_util.h
+++ b/trunk/arch/um/include/user_util.h
@@ -8,6 +8,9 @@
#include "sysdep/ptrace.h"
+/* Copied from kernel.h */
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
#define CATCH_EINTR(expr) while ((errno = 0, ((expr) < 0)) && (errno == EINTR))
extern int mode_tt;
@@ -31,7 +34,7 @@ extern unsigned long uml_physmem;
extern unsigned long uml_reserved;
extern unsigned long end_vm;
extern unsigned long start_vm;
-extern unsigned long highmem;
+extern unsigned long long highmem;
extern char host_info[];
diff --git a/trunk/arch/um/kernel/exec_kern.c b/trunk/arch/um/kernel/exec_kern.c
index 1ca84319317d..c0cb627bf594 100644
--- a/trunk/arch/um/kernel/exec_kern.c
+++ b/trunk/arch/um/kernel/exec_kern.c
@@ -22,6 +22,7 @@
void flush_thread(void)
{
+ arch_flush_thread(¤t->thread.arch);
CHOOSE_MODE(flush_thread_tt(), flush_thread_skas());
}
@@ -58,14 +59,14 @@ long um_execve(char *file, char __user *__user *argv, char __user *__user *env)
return(err);
}
-long sys_execve(char *file, char __user *__user *argv,
+long sys_execve(char __user *file, char __user *__user *argv,
char __user *__user *env)
{
long error;
char *filename;
lock_kernel();
- filename = getname((char __user *) file);
+ filename = getname(file);
error = PTR_ERR(filename);
if (IS_ERR(filename)) goto out;
error = execve1(filename, argv, env);
@@ -74,14 +75,3 @@ long sys_execve(char *file, char __user *__user *argv,
unlock_kernel();
return(error);
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/kernel/mem.c b/trunk/arch/um/kernel/mem.c
index 92cce96b5e24..44e41a35f000 100644
--- a/trunk/arch/um/kernel/mem.c
+++ b/trunk/arch/um/kernel/mem.c
@@ -30,7 +30,7 @@ extern char __binary_start;
unsigned long *empty_zero_page = NULL;
unsigned long *empty_bad_page = NULL;
pgd_t swapper_pg_dir[PTRS_PER_PGD];
-unsigned long highmem;
+unsigned long long highmem;
int kmalloc_ok = 0;
static unsigned long brk_end;
diff --git a/trunk/arch/um/kernel/process_kern.c b/trunk/arch/um/kernel/process_kern.c
index 3113cab8675e..f6a5a502120b 100644
--- a/trunk/arch/um/kernel/process_kern.c
+++ b/trunk/arch/um/kernel/process_kern.c
@@ -156,9 +156,25 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long stack_top, struct task_struct * p,
struct pt_regs *regs)
{
+ int ret;
+
p->thread = (struct thread_struct) INIT_THREAD;
- return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
- clone_flags, sp, stack_top, p, regs));
+ ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
+ clone_flags, sp, stack_top, p, regs);
+
+ if (ret || !current->thread.forking)
+ goto out;
+
+ clear_flushed_tls(p);
+
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS)
+ ret = arch_copy_tls(p);
+
+out:
+ return ret;
}
void initial_thread_cb(void (*proc)(void *), void *arg)
@@ -185,10 +201,6 @@ void default_idle(void)
{
CHOOSE_MODE(uml_idle_timer(), (void) 0);
- atomic_inc(&init_mm.mm_count);
- current->mm = &init_mm;
- current->active_mm = &init_mm;
-
while(1){
/* endless idle loop with no priority at all */
@@ -407,7 +419,7 @@ static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int
return strlen(buf);
}
-static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data)
+static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
{
char tmp[2];
diff --git a/trunk/arch/um/kernel/ptrace.c b/trunk/arch/um/kernel/ptrace.c
index 98e09395c093..60d2eda995c1 100644
--- a/trunk/arch/um/kernel/ptrace.c
+++ b/trunk/arch/um/kernel/ptrace.c
@@ -46,6 +46,7 @@ extern int poke_user(struct task_struct * child, long addr, long data);
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int i, ret;
+ unsigned long __user *p = (void __user *)(unsigned long)data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -58,7 +59,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, p);
break;
}
@@ -136,15 +137,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_GETREGS
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
- if (!access_ok(VERIFY_WRITE, (unsigned long *)data,
- MAX_REG_OFFSET)) {
+ if (!access_ok(VERIFY_WRITE, p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
- __put_user(getreg(child, i),
- (unsigned long __user *) data);
- data += sizeof(long);
+ __put_user(getreg(child, i), p);
+ p++;
}
ret = 0;
break;
@@ -153,15 +152,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_SETREGS
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
unsigned long tmp = 0;
- if (!access_ok(VERIFY_READ, (unsigned *)data,
- MAX_REG_OFFSET)) {
+ if (!access_ok(VERIFY_READ, p, MAX_REG_OFFSET)) {
ret = -EIO;
break;
}
for ( i = 0; i < MAX_REG_OFFSET; i += sizeof(long) ) {
- __get_user(tmp, (unsigned long __user *) data);
+ __get_user(tmp, p);
putreg(child, i, tmp);
- data += sizeof(long);
+ p++;
}
ret = 0;
break;
@@ -187,14 +185,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = set_fpxregs(data, child);
break;
#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = ptrace_get_thread_area(child, addr,
+ (struct user_desc __user *) data);
+ break;
+
+ case PTRACE_SET_THREAD_AREA:
+ ret = ptrace_set_thread_area(child, addr,
+ (struct user_desc __user *) data);
+ break;
+
case PTRACE_FAULTINFO: {
- /* Take the info from thread->arch->faultinfo,
- * but transfer max. sizeof(struct ptrace_faultinfo).
- * On i386, ptrace_faultinfo is smaller!
- */
- ret = copy_to_user((unsigned long __user *) data,
- &child->thread.arch.faultinfo,
- sizeof(struct ptrace_faultinfo));
+ /* Take the info from thread->arch->faultinfo,
+ * but transfer max. sizeof(struct ptrace_faultinfo).
+ * On i386, ptrace_faultinfo is smaller!
+ */
+ ret = copy_to_user(p, &child->thread.arch.faultinfo,
+ sizeof(struct ptrace_faultinfo));
if(ret)
break;
break;
@@ -204,8 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_LDT: {
struct ptrace_ldt ldt;
- if(copy_from_user(&ldt, (unsigned long __user *) data,
- sizeof(ldt))){
+ if(copy_from_user(&ldt, p, sizeof(ldt))){
ret = -EIO;
break;
}
diff --git a/trunk/arch/um/kernel/skas/process_kern.c b/trunk/arch/um/kernel/skas/process_kern.c
index 3f70a2e12f06..2135eaf98a93 100644
--- a/trunk/arch/um/kernel/skas/process_kern.c
+++ b/trunk/arch/um/kernel/skas/process_kern.c
@@ -35,6 +35,8 @@ void switch_to_skas(void *prev, void *next)
switch_threads(&from->thread.mode.skas.switch_buf,
to->thread.mode.skas.switch_buf);
+ arch_switch_to_skas(current->thread.prev_sched, current);
+
if(current->pid == 0)
switch_timers(1);
}
@@ -89,10 +91,17 @@ void fork_handler(int sig)
panic("blech");
schedule_tail(current->thread.prev_sched);
+
+ /* XXX: if interrupt_end() calls schedule, this call to
+ * arch_switch_to_skas isn't needed. We could want to apply this to
+ * improve performance. -bb */
+ arch_switch_to_skas(current->thread.prev_sched, current);
+
current->thread.prev_sched = NULL;
/* Handle any immediate reschedules or signals */
interrupt_end();
+
userspace(¤t->thread.regs.regs);
}
@@ -109,6 +118,8 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
handler = fork_handler;
+
+ arch_copy_thread(¤t->thread.arch, &p->thread.arch);
}
else {
init_thread_registers(&p->thread.regs.regs);
diff --git a/trunk/arch/um/kernel/syscall_kern.c b/trunk/arch/um/kernel/syscall_kern.c
index 8e1a3501ff46..37d3978337d8 100644
--- a/trunk/arch/um/kernel/syscall_kern.c
+++ b/trunk/arch/um/kernel/syscall_kern.c
@@ -104,7 +104,7 @@ long sys_pipe(unsigned long __user * fildes)
}
-long sys_uname(struct old_utsname * name)
+long sys_uname(struct old_utsname __user * name)
{
long err;
if (!name)
@@ -115,7 +115,7 @@ long sys_uname(struct old_utsname * name)
return err?-EFAULT:0;
}
-long sys_olduname(struct oldold_utsname * name)
+long sys_olduname(struct oldold_utsname __user * name)
{
long error;
diff --git a/trunk/arch/um/kernel/trap_kern.c b/trunk/arch/um/kernel/trap_kern.c
index d56046c2aba2..02f6d4d8dc3a 100644
--- a/trunk/arch/um/kernel/trap_kern.c
+++ b/trunk/arch/um/kernel/trap_kern.c
@@ -198,7 +198,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
- si.si_addr = (void *)address;
+ si.si_addr = (void __user *)address;
current->thread.arch.faultinfo = fi;
force_sig_info(SIGBUS, &si, current);
} else if (err == -ENOMEM) {
@@ -207,7 +207,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
} else {
BUG_ON(err != -EFAULT);
si.si_signo = SIGSEGV;
- si.si_addr = (void *) address;
+ si.si_addr = (void __user *) address;
current->thread.arch.faultinfo = fi;
force_sig_info(SIGSEGV, &si, current);
}
@@ -220,8 +220,8 @@ void bad_segv(struct faultinfo fi, unsigned long ip)
si.si_signo = SIGSEGV;
si.si_code = SEGV_ACCERR;
- si.si_addr = (void *) FAULT_ADDRESS(fi);
- current->thread.arch.faultinfo = fi;
+ si.si_addr = (void __user *) FAULT_ADDRESS(fi);
+ current->thread.arch.faultinfo = fi;
force_sig_info(SIGSEGV, &si, current);
}
diff --git a/trunk/arch/um/kernel/tt/process_kern.c b/trunk/arch/um/kernel/tt/process_kern.c
index 295c1ac817b3..a9c1443fc548 100644
--- a/trunk/arch/um/kernel/tt/process_kern.c
+++ b/trunk/arch/um/kernel/tt/process_kern.c
@@ -51,6 +51,13 @@ void switch_to_tt(void *prev, void *next)
c = 0;
+ /* Notice that here we "up" the semaphore on which "to" is waiting, and
+ * below (the read) we wait on this semaphore (which is implemented by
+ * switch_pipe) and go sleeping. Thus, after that, we have resumed in
+ * "to", and can't use any more the value of "from" (which is outdated),
+ * nor the value in "to" (since it was the task which stole us the CPU,
+ * which we don't care about). */
+
err = os_write_file(to->thread.mode.tt.switch_pipe[1], &c, sizeof(c));
if(err != sizeof(c))
panic("write of switch_pipe failed, err = %d", -err);
@@ -77,7 +84,7 @@ void switch_to_tt(void *prev, void *next)
change_sig(SIGALRM, alrm);
change_sig(SIGPROF, prof);
- arch_switch();
+ arch_switch_to_tt(prev_sched, current);
flush_tlb_all();
local_irq_restore(flags);
@@ -141,7 +148,6 @@ static void new_thread_handler(int sig)
set_cmdline("(kernel thread)");
change_sig(SIGUSR1, 1);
- change_sig(SIGVTALRM, 1);
change_sig(SIGPROF, 1);
local_irq_enable();
if(!run_kernel_thread(fn, arg, ¤t->thread.exec_buf))
diff --git a/trunk/arch/um/os-Linux/Makefile b/trunk/arch/um/os-Linux/Makefile
index 1659386b42bb..f4bfc4c7ccac 100644
--- a/trunk/arch/um/os-Linux/Makefile
+++ b/trunk/arch/um/os-Linux/Makefile
@@ -4,7 +4,7 @@
#
obj-y = aio.o elf_aux.o file.o helper.o irq.o main.o mem.o process.o sigio.o \
- signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o \
+ signal.o start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o tls.o \
user_syms.o util.o drivers/ sys-$(SUBARCH)/
obj-$(CONFIG_MODE_SKAS) += skas/
@@ -12,12 +12,9 @@ obj-$(CONFIG_TTY_LOG) += tty_log.o
user-objs-$(CONFIG_TTY_LOG) += tty_log.o
USER_OBJS := $(user-objs-y) aio.o elf_aux.o file.o helper.o irq.o main.o mem.o \
- process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o \
+ process.o sigio.o signal.o start_up.o time.o trap.o tt.o tty.o tls.o \
uaccess.o umid.o util.o
-elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
-CFLAGS_elf_aux.o += -I$(objtree)/arch/um
-
CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH)
HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \
diff --git a/trunk/arch/um/os-Linux/drivers/ethertap_kern.c b/trunk/arch/um/os-Linux/drivers/ethertap_kern.c
index 6ae4b19d9f50..768606bec233 100644
--- a/trunk/arch/um/os-Linux/drivers/ethertap_kern.c
+++ b/trunk/arch/um/os-Linux/drivers/ethertap_kern.c
@@ -102,18 +102,7 @@ static struct transport ethertap_transport = {
static int register_ethertap(void)
{
register_transport(ðertap_transport);
- return(1);
+ return 0;
}
__initcall(register_ethertap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/os-Linux/drivers/tuntap_kern.c b/trunk/arch/um/os-Linux/drivers/tuntap_kern.c
index 4202b9ebad4c..190009a6f89c 100644
--- a/trunk/arch/um/os-Linux/drivers/tuntap_kern.c
+++ b/trunk/arch/um/os-Linux/drivers/tuntap_kern.c
@@ -87,18 +87,7 @@ static struct transport tuntap_transport = {
static int register_tuntap(void)
{
register_transport(&tuntap_transport);
- return(1);
+ return 0;
}
__initcall(register_tuntap);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/trunk/arch/um/os-Linux/mem.c b/trunk/arch/um/os-Linux/mem.c
index 9d7d69a523bb..6ab372da9657 100644
--- a/trunk/arch/um/os-Linux/mem.c
+++ b/trunk/arch/um/os-Linux/mem.c
@@ -121,36 +121,11 @@ int create_tmp_file(unsigned long long len)
return(fd);
}
-static int create_anon_file(unsigned long long len)
-{
- void *addr;
- int fd;
-
- fd = open("/dev/anon", O_RDWR);
- if(fd < 0) {
- perror("opening /dev/anon");
- exit(1);
- }
-
- addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
- if(addr == MAP_FAILED){
- perror("mapping physmem file");
- exit(1);
- }
- munmap(addr, len);
-
- return(fd);
-}
-
-extern int have_devanon;
-
int create_mem_file(unsigned long long len)
{
int err, fd;
- if(have_devanon)
- fd = create_anon_file(len);
- else fd = create_tmp_file(len);
+ fd = create_tmp_file(len);
err = os_set_exec_close(fd, 1);
if(err < 0){
diff --git a/trunk/arch/um/os-Linux/process.c b/trunk/arch/um/os-Linux/process.c
index d261888f39c4..8176b0b52047 100644
--- a/trunk/arch/um/os-Linux/process.c
+++ b/trunk/arch/um/os-Linux/process.c
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#include "ptrace_user.h"
#include "os.h"
#include "user.h"
@@ -20,6 +21,7 @@
#include "kern_util.h"
#include "longjmp.h"
#include "skas_ptrace.h"
+#include "kern_constants.h"
#define ARBITRARY_ADDR -1
#define FAILURE_PID -1
@@ -187,6 +189,48 @@ int os_unmap_memory(void *addr, int len)
return(0);
}
+#ifndef MADV_REMOVE
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
+#endif
+
+int os_drop_memory(void *addr, int length)
+{
+ int err;
+
+ err = madvise(addr, length, MADV_REMOVE);
+ if(err < 0)
+ err = -errno;
+ return err;
+}
+
+int can_drop_memory(void)
+{
+ void *addr;
+ int fd;
+
+ printk("Checking host MADV_REMOVE support...");
+ fd = create_mem_file(UM_KERN_PAGE_SIZE);
+ if(fd < 0){
+ printk("Creating test memory file failed, err = %d\n", -fd);
+ return 0;
+ }
+
+ addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, fd, 0);
+ if(addr == MAP_FAILED){
+ printk("Mapping test memory file failed, err = %d\n", -errno);
+ return 0;
+ }
+
+ if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){
+ printk("MADV_REMOVE failed, err = %d\n", -errno);
+ return 0;
+ }
+
+ printk("OK\n");
+ return 1;
+}
+
void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int))
{
int flags = 0, pages;
diff --git a/trunk/arch/um/os-Linux/start_up.c b/trunk/arch/um/os-Linux/start_up.c
index 32753131f8d8..387e26af301a 100644
--- a/trunk/arch/um/os-Linux/start_up.c
+++ b/trunk/arch/um/os-Linux/start_up.c
@@ -470,25 +470,6 @@ int can_do_skas(void)
}
#endif
-int have_devanon = 0;
-
-/* Runs on boot kernel stack - already safe to use printk. */
-
-void check_devanon(void)
-{
- int fd;
-
- printk("Checking for /dev/anon on the host...");
- fd = open("/dev/anon", O_RDWR);
- if(fd < 0){
- printk("Not available (open failed with errno %d)\n", errno);
- return;
- }
-
- printk("OK\n");
- have_devanon = 1;
-}
-
int __init parse_iomem(char *str, int *add)
{
struct iomem_region *new;
@@ -664,6 +645,5 @@ void os_check_bugs(void)
{
check_ptrace();
check_sigio();
- check_devanon();
}
diff --git a/trunk/arch/um/os-Linux/sys-i386/Makefile b/trunk/arch/um/os-Linux/sys-i386/Makefile
index 340ef26f5944..b3213613c41c 100644
--- a/trunk/arch/um/os-Linux/sys-i386/Makefile
+++ b/trunk/arch/um/os-Linux/sys-i386/Makefile
@@ -3,7 +3,7 @@
# Licensed under the GPL
#
-obj-$(CONFIG_MODE_SKAS) = registers.o
+obj-$(CONFIG_MODE_SKAS) = registers.o tls.o
USER_OBJS := $(obj-y)
diff --git a/trunk/arch/um/os-Linux/sys-i386/tls.c b/trunk/arch/um/os-Linux/sys-i386/tls.c
new file mode 100644
index 000000000000..ba21f0e04a2f
--- /dev/null
+++ b/trunk/arch/um/os-Linux/sys-i386/tls.c
@@ -0,0 +1,33 @@
+#include
+#include "sysdep/tls.h"
+#include "user_util.h"
+
+static _syscall1(int, get_thread_area, user_desc_t *, u_info);
+
+/* Checks whether host supports TLS, and sets *tls_min according to the value
+ * valid on the host.
+ * i386 host have it == 6; x86_64 host have it == 12, for i386 emulation. */
+void check_host_supports_tls(int *supports_tls, int *tls_min) {
+ /* Values for x86 and x86_64.*/
+ int val[] = {GDT_ENTRY_TLS_MIN_I386, GDT_ENTRY_TLS_MIN_X86_64};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ user_desc_t info;
+ info.entry_number = val[i];
+
+ if (get_thread_area(&info) == 0) {
+ *tls_min = val[i];
+ *supports_tls = 1;
+ return;
+ } else {
+ if (errno == EINVAL)
+ continue;
+ else if (errno == ENOSYS)
+ *supports_tls = 0;
+ return;
+ }
+ }
+
+ *supports_tls = 0;
+}
diff --git a/trunk/arch/um/os-Linux/tls.c b/trunk/arch/um/os-Linux/tls.c
new file mode 100644
index 000000000000..9cb09a45546b
--- /dev/null
+++ b/trunk/arch/um/os-Linux/tls.c
@@ -0,0 +1,76 @@
+#include
+#include
+#include
+#include "sysdep/tls.h"
+#include "uml-config.h"
+
+/* TLS support - we basically rely on the host's one.*/
+
+/* In TT mode, this should be called only by the tracing thread, and makes sense
+ * only for PTRACE_SET_THREAD_AREA. In SKAS mode, it's used normally.
+ *
+ */
+
+#ifndef PTRACE_GET_THREAD_AREA
+#define PTRACE_GET_THREAD_AREA 25
+#endif
+
+#ifndef PTRACE_SET_THREAD_AREA
+#define PTRACE_SET_THREAD_AREA 26
+#endif
+
+int os_set_thread_area(user_desc_t *info, int pid)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_SET_THREAD_AREA, pid, info->entry_number,
+ (unsigned long) info);
+ if (ret < 0)
+ ret = -errno;
+ return ret;
+}
+
+#ifdef UML_CONFIG_MODE_SKAS
+
+int os_get_thread_area(user_desc_t *info, int pid)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_GET_THREAD_AREA, pid, info->entry_number,
+ (unsigned long) info);
+ if (ret < 0)
+ ret = -errno;
+ return ret;
+}
+
+#endif
+
+#ifdef UML_CONFIG_MODE_TT
+#include "linux/unistd.h"
+
+static _syscall1(int, get_thread_area, user_desc_t *, u_info);
+static _syscall1(int, set_thread_area, user_desc_t *, u_info);
+
+int do_set_thread_area_tt(user_desc_t *info)
+{
+ int ret;
+
+ ret = set_thread_area(info);
+ if (ret < 0) {
+ ret = -errno;
+ }
+ return ret;
+}
+
+int do_get_thread_area_tt(user_desc_t *info)
+{
+ int ret;
+
+ ret = get_thread_area(info);
+ if (ret < 0) {
+ ret = -errno;
+ }
+ return ret;
+}
+
+#endif /* UML_CONFIG_MODE_TT */
diff --git a/trunk/arch/um/scripts/Makefile.rules b/trunk/arch/um/scripts/Makefile.rules
index 2e41cabd3d93..b696b451774c 100644
--- a/trunk/arch/um/scripts/Makefile.rules
+++ b/trunk/arch/um/scripts/Makefile.rules
@@ -20,25 +20,7 @@ define unprofile
$(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1)))
endef
-
-# cmd_make_link checks to see if the $(foo-dir) variable starts with a /. If
-# so, it's considered to be a path relative to $(srcdir) rather than
-# $(srcdir)/arch/$(SUBARCH). This is because x86_64 wants to get ldt.c from
-# arch/um/sys-i386 rather than arch/i386 like the other borrowed files. So,
-# it sets $(ldt.c-dir) to /arch/um/sys-i386.
-quiet_cmd_make_link = SYMLINK $@
-cmd_make_link = rm -f $@; ln -sf $(srctree)$(if $(filter-out /%,$($(notdir $@)-dir)),/arch/$(SUBARCH))/$($(notdir $@)-dir)/$(notdir $@) $@
-
-# this needs to be before the foreach, because targets does not accept
-# complete paths like $(obj)/$(f). To make sure this works, use a := assignment
-# or we will get $(obj)/$(f) in the "targets" value.
-# Also, this forces you to use the := syntax when assigning to targets.
-# Otherwise the line below will cause an infinite loop (if you don't know why,
-# just do it).
-
-targets := $(targets) $(SYMLINKS)
-
-SYMLINKS := $(foreach f,$(SYMLINKS),$(obj)/$(f))
-
-$(SYMLINKS): FORCE
- $(call if_changed,make_link)
+ifdef subarch-obj-y
+obj-y += subarch.o
+subarch-y = $(addprefix ../../$(SUBARCH)/,$(subarch-obj-y))
+endif
diff --git a/trunk/arch/um/scripts/Makefile.unmap b/trunk/arch/um/scripts/Makefile.unmap
deleted file mode 100644
index b2165188d942..000000000000
--- a/trunk/arch/um/scripts/Makefile.unmap
+++ /dev/null
@@ -1,22 +0,0 @@
-clean-files += unmap_tmp.o unmap_fin.o unmap.o
-
-ifdef CONFIG_MODE_TT
-
-#Always build unmap_fin.o
-extra-y += unmap_fin.o
-#Do dependency tracking for unmap.o (it will be always built, but won't get the tracking unless we use this).
-targets += unmap.o
-
-#XXX: partially copied from arch/um/scripts/Makefile.rules
-$(obj)/unmap.o: _c_flags = $(call unprofile,$(CFLAGS))
-
-quiet_cmd_wrapld = LD $@
-define cmd_wrapld
- $(LD) $(LDFLAGS) -r -o $(obj)/unmap_tmp.o $< ; \
- $(OBJCOPY) $(UML_OBJCOPYFLAGS) $(obj)/unmap_tmp.o $@ -G switcheroo
-endef
-
-$(obj)/unmap_fin.o : $(obj)/unmap.o FORCE
- $(call if_changed,wrapld)
-
-endif
diff --git a/trunk/arch/um/sys-i386/Makefile b/trunk/arch/um/sys-i386/Makefile
index f5fd5b0156d0..98b20b7bba4f 100644
--- a/trunk/arch/um/sys-i386/Makefile
+++ b/trunk/arch/um/sys-i386/Makefile
@@ -1,23 +1,18 @@
-obj-y := bitops.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
- ptrace_user.o semaphore.o signal.o sigcontext.o syscalls.o sysrq.o \
- sys_call_table.o
+obj-y = bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
+ ptrace_user.o signal.o sigcontext.o syscalls.o sysrq.o \
+ sys_call_table.o tls.o
obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
-obj-$(CONFIG_MODULES) += module.o
+subarch-obj-y = lib/bitops.o kernel/semaphore.o
+subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o
+subarch-obj-$(CONFIG_MODULES) += kernel/module.o
USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o stub_segv.o
-SYMLINKS = bitops.c semaphore.c highmem.c module.c
-
include arch/um/scripts/Makefile.rules
-bitops.c-dir = lib
-semaphore.c-dir = kernel
-highmem.c-dir = mm
-module.c-dir = kernel
-
-$(obj)/stub_segv.o : _c_flags = $(call unprofile,$(CFLAGS))
+extra-$(CONFIG_MODE_TT) += unmap.o
-include arch/um/scripts/Makefile.unmap
+$(obj)/stub_segv.o $(obj)/unmap.o: \
+ _c_flags = $(call unprofile,$(CFLAGS))
diff --git a/trunk/arch/um/sys-i386/ptrace.c b/trunk/arch/um/sys-i386/ptrace.c
index 8032a105949a..6028bc7cc01b 100644
--- a/trunk/arch/um/sys-i386/ptrace.c
+++ b/trunk/arch/um/sys-i386/ptrace.c
@@ -15,9 +15,22 @@
#include "sysdep/sigcontext.h"
#include "sysdep/sc.h"
-void arch_switch(void)
+void arch_switch_to_tt(struct task_struct *from, struct task_struct *to)
{
- update_debugregs(current->thread.arch.debugregs_seq);
+ update_debugregs(to->thread.arch.debugregs_seq);
+ arch_switch_tls_tt(from, to);
+}
+
+void arch_switch_to_skas(struct task_struct *from, struct task_struct *to)
+{
+ int err = arch_switch_tls_skas(from, to);
+ if (!err)
+ return;
+
+ if (err != -EINVAL)
+ printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err);
+ else
+ printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n");
}
int is_syscall(unsigned long addr)
@@ -124,22 +137,22 @@ unsigned long getreg(struct task_struct *child, int regno)
int peek_user(struct task_struct *child, long addr, long data)
{
/* read the word at location addr in the USER area. */
- unsigned long tmp;
+ unsigned long tmp;
- if ((addr & 3) || addr < 0)
- return -EIO;
+ if ((addr & 3) || addr < 0)
+ return -EIO;
- tmp = 0; /* Default return condition */
- if(addr < MAX_REG_OFFSET){
- tmp = getreg(child, addr);
- }
- else if((addr >= offsetof(struct user, u_debugreg[0])) &&
- (addr <= offsetof(struct user, u_debugreg[7]))){
- addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
- tmp = child->thread.arch.debugregs[addr];
- }
- return put_user(tmp, (unsigned long *) data);
+ tmp = 0; /* Default return condition */
+ if(addr < MAX_REG_OFFSET){
+ tmp = getreg(child, addr);
+ }
+ else if((addr >= offsetof(struct user, u_debugreg[0])) &&
+ (addr <= offsetof(struct user, u_debugreg[7]))){
+ addr -= offsetof(struct user, u_debugreg[0]);
+ addr = addr >> 2;
+ tmp = child->thread.arch.debugregs[addr];
+ }
+ return put_user(tmp, (unsigned long __user *) data);
}
struct i387_fxsave_struct {
diff --git a/trunk/arch/um/sys-i386/ptrace_user.c b/trunk/arch/um/sys-i386/ptrace_user.c
index 7c376c95de50..9f3bd8ed78f5 100644
--- a/trunk/arch/um/sys-i386/ptrace_user.c
+++ b/trunk/arch/um/sys-i386/ptrace_user.c
@@ -14,6 +14,7 @@
#include "sysdep/thread.h"
#include "user.h"
#include "os.h"
+#include "uml-config.h"
int ptrace_getregs(long pid, unsigned long *regs_out)
{
@@ -43,6 +44,7 @@ int ptrace_setfpregs(long pid, unsigned long *regs)
return 0;
}
+/* All the below stuff is of interest for TT mode only */
static void write_debugregs(int pid, unsigned long *regs)
{
struct user *dummy;
@@ -75,7 +77,6 @@ static void read_debugregs(int pid, unsigned long *regs)
/* Accessed only by the tracing thread */
static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 };
-static int debugregs_seq = 0;
void arch_enter_kernel(void *task, int pid)
{
@@ -89,6 +90,11 @@ void arch_leave_kernel(void *task, int pid)
write_debugregs(pid, TASK_DEBUGREGS(task));
}
+#ifdef UML_CONFIG_PT_PROXY
+/* Accessed only by the tracing thread */
+static int debugregs_seq;
+
+/* Only called by the ptrace proxy */
void ptrace_pokeuser(unsigned long addr, unsigned long data)
{
if((addr < offsetof(struct user, u_debugreg[0])) ||
@@ -109,6 +115,7 @@ static void update_debugregs_cb(void *arg)
write_debugregs(pid, kernel_debugregs);
}
+/* Optimized out in its header when not defined */
void update_debugregs(int seq)
{
int me;
@@ -118,6 +125,7 @@ void update_debugregs(int seq)
me = os_getpid();
initial_thread_cb(update_debugregs_cb, &me);
}
+#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
diff --git a/trunk/arch/um/sys-i386/signal.c b/trunk/arch/um/sys-i386/signal.c
index 33a40f5ef0d2..f5d0e1c37ea2 100644
--- a/trunk/arch/um/sys-i386/signal.c
+++ b/trunk/arch/um/sys-i386/signal.c
@@ -19,7 +19,7 @@
#include "skas.h"
static int copy_sc_from_user_skas(struct pt_regs *regs,
- struct sigcontext *from)
+ struct sigcontext __user *from)
{
struct sigcontext sc;
unsigned long fpregs[HOST_FP_SIZE];
@@ -57,7 +57,7 @@ static int copy_sc_from_user_skas(struct pt_regs *regs,
return(0);
}
-int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
+int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate __user *to_fp,
struct pt_regs *regs, unsigned long sp)
{
struct sigcontext sc;
@@ -92,7 +92,7 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
"errno = %d\n", err);
return(1);
}
- to_fp = (to_fp ? to_fp : (struct _fpstate *) (to + 1));
+ to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
sc.fpstate = to_fp;
if(err)
@@ -113,10 +113,11 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
* saved pointer is in the kernel, but the sigcontext is in userspace, so we
* copy_to_user it.
*/
-int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
+int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
int fpsize)
{
- struct _fpstate *to_fp, *from_fp;
+ struct _fpstate *to_fp;
+ struct _fpstate __user *from_fp;
unsigned long sigs;
int err;
@@ -131,13 +132,14 @@ int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
return(err);
}
-int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
+int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate __user *fp,
struct sigcontext *from, int fpsize, unsigned long sp)
{
- struct _fpstate *to_fp, *from_fp;
+ struct _fpstate __user *to_fp;
+ struct _fpstate *from_fp;
int err;
- to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
+ to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
from_fp = from->fpstate;
err = copy_to_user(to, from, sizeof(*to));
@@ -165,7 +167,7 @@ static int copy_sc_from_user(struct pt_regs *to, void __user *from)
return(ret);
}
-static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
+static int copy_sc_to_user(struct sigcontext *to, struct _fpstate __user *fp,
struct pt_regs *from, unsigned long sp)
{
return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs),
@@ -173,7 +175,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
copy_sc_to_user_skas(to, fp, from, sp)));
}
-static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
+static int copy_ucontext_to_user(struct ucontext __user *uc, struct _fpstate __user *fp,
sigset_t *set, unsigned long sp)
{
int err = 0;
@@ -188,7 +190,7 @@ static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp,
struct sigframe
{
- char *pretcode;
+ char __user *pretcode;
int sig;
struct sigcontext sc;
struct _fpstate fpstate;
@@ -198,10 +200,10 @@ struct sigframe
struct rt_sigframe
{
- char *pretcode;
+ char __user *pretcode;
int sig;
- struct siginfo *pinfo;
- void *puc;
+ struct siginfo __user *pinfo;
+ void __user *puc;
struct siginfo info;
struct ucontext uc;
struct _fpstate fpstate;
@@ -213,16 +215,16 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
sigset_t *mask)
{
struct sigframe __user *frame;
- void *restorer;
+ void __user *restorer;
unsigned long save_sp = PT_REGS_SP(regs);
int err = 0;
stack_top &= -8UL;
- frame = (struct sigframe *) stack_top - 1;
+ frame = (struct sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
- restorer = (void *) frame->retcode;
+ restorer = frame->retcode;
if(ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
@@ -278,16 +280,16 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
siginfo_t *info, sigset_t *mask)
{
struct rt_sigframe __user *frame;
- void *restorer;
+ void __user *restorer;
unsigned long save_sp = PT_REGS_SP(regs);
int err = 0;
stack_top &= -8UL;
- frame = (struct rt_sigframe *) stack_top - 1;
+ frame = (struct rt_sigframe __user *) stack_top - 1;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return 1;
- restorer = (void *) frame->retcode;
+ restorer = frame->retcode;
if(ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
@@ -333,7 +335,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
long sys_sigreturn(struct pt_regs regs)
{
unsigned long sp = PT_REGS_SP(¤t->thread.regs);
- struct sigframe __user *frame = (struct sigframe *)(sp - 8);
+ struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
unsigned long __user *oldmask = &sc->oldmask;
@@ -365,8 +367,8 @@ long sys_sigreturn(struct pt_regs regs)
long sys_rt_sigreturn(struct pt_regs regs)
{
- unsigned long __user sp = PT_REGS_SP(¤t->thread.regs);
- struct rt_sigframe __user *frame = (struct rt_sigframe *) (sp - 4);
+ unsigned long sp = PT_REGS_SP(¤t->thread.regs);
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (sp - 4);
sigset_t set;
struct ucontext __user *uc = &frame->uc;
int sig_size = _NSIG_WORDS * sizeof(unsigned long);
diff --git a/trunk/arch/um/sys-i386/sys_call_table.S b/trunk/arch/um/sys-i386/sys_call_table.S
index ad75c27afe38..1ff61474b25c 100644
--- a/trunk/arch/um/sys-i386/sys_call_table.S
+++ b/trunk/arch/um/sys-i386/sys_call_table.S
@@ -6,8 +6,6 @@
#define sys_vm86old sys_ni_syscall
#define sys_vm86 sys_ni_syscall
-#define sys_set_thread_area sys_ni_syscall
-#define sys_get_thread_area sys_ni_syscall
#define sys_stime um_stime
#define sys_time um_time
diff --git a/trunk/arch/um/sys-i386/syscalls.c b/trunk/arch/um/sys-i386/syscalls.c
index 83e9be820a86..749dd1bfe60f 100644
--- a/trunk/arch/um/sys-i386/syscalls.c
+++ b/trunk/arch/um/sys-i386/syscalls.c
@@ -61,21 +61,27 @@ long old_select(struct sel_arg_struct __user *arg)
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
-/* The i386 version skips reading from %esi, the fourth argument. So we must do
- * this, too.
+/*
+ * The prototype on i386 is:
+ *
+ * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
+ *
+ * and the "newtls" arg. on i386 is read by copy_thread directly from the
+ * register saved on the stack.
*/
long sys_clone(unsigned long clone_flags, unsigned long newsp,
- int __user *parent_tid, int unused, int __user *child_tid)
+ int __user *parent_tid, void *newtls, int __user *child_tid)
{
long ret;
if (!newsp)
newsp = UPT_SP(¤t->thread.regs.regs);
+
current->thread.forking = 1;
ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid,
child_tid);
current->thread.forking = 0;
- return(ret);
+ return ret;
}
/*
@@ -104,7 +110,7 @@ long sys_ipc (uint call, int first, int second,
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void **) ptr))
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
diff --git a/trunk/arch/um/sys-i386/tls.c b/trunk/arch/um/sys-i386/tls.c
new file mode 100644
index 000000000000..a3188e861cc7
--- /dev/null
+++ b/trunk/arch/um/sys-i386/tls.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso
+ * Licensed under the GPL
+ */
+
+#include "linux/config.h"
+#include "linux/kernel.h"
+#include "linux/sched.h"
+#include "linux/slab.h"
+#include "linux/types.h"
+#include "asm/uaccess.h"
+#include "asm/ptrace.h"
+#include "asm/segment.h"
+#include "asm/smp.h"
+#include "asm/desc.h"
+#include "choose-mode.h"
+#include "kern.h"
+#include "kern_util.h"
+#include "mode_kern.h"
+#include "os.h"
+#include "mode.h"
+
+#ifdef CONFIG_MODE_SKAS
+#include "skas.h"
+#endif
+
+/* If needed we can detect when it's uninitialized. */
+static int host_supports_tls = -1;
+int host_gdt_entry_tls_min = -1;
+
+#ifdef CONFIG_MODE_SKAS
+int do_set_thread_area_skas(struct user_desc *info)
+{
+ int ret;
+ u32 cpu;
+
+ cpu = get_cpu();
+ ret = os_set_thread_area(info, userspace_pid[cpu]);
+ put_cpu();
+ return ret;
+}
+
+int do_get_thread_area_skas(struct user_desc *info)
+{
+ int ret;
+ u32 cpu;
+
+ cpu = get_cpu();
+ ret = os_get_thread_area(info, userspace_pid[cpu]);
+ put_cpu();
+ return ret;
+}
+#endif
+
+/*
+ * sys_get_thread_area: get a yet unused TLS descriptor index.
+ * XXX: Consider leaving one free slot for glibc usage at first place. This must
+ * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
+ *
+ * Also, this must be tested when compiling in SKAS mode with dinamic linking
+ * and running against NPTL.
+ */
+static int get_free_idx(struct task_struct* task)
+{
+ struct thread_struct *t = &task->thread;
+ int idx;
+
+ if (!t->arch.tls_array)
+ return GDT_ENTRY_TLS_MIN;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (!t->arch.tls_array[idx].present)
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+}
+
+static inline void clear_user_desc(struct user_desc* info)
+{
+ /* Postcondition: LDT_empty(info) returns true. */
+ memset(info, 0, sizeof(*info));
+
+ /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
+ * indeed an empty user_desc.
+ */
+ info->read_exec_only = 1;
+ info->seg_not_present = 1;
+}
+
+#define O_FORCE 1
+
+static int load_TLS(int flags, struct task_struct *to)
+{
+ int ret = 0;
+ int idx;
+
+ for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
+ struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
+
+ /* Actually, now if it wasn't flushed it gets cleared and
+ * flushed to the host, which will clear it.*/
+ if (!curr->present) {
+ if (!curr->flushed) {
+ clear_user_desc(&curr->tls);
+ curr->tls.entry_number = idx;
+ } else {
+ WARN_ON(!LDT_empty(&curr->tls));
+ continue;
+ }
+ }
+
+ if (!(flags & O_FORCE) && curr->flushed)
+ continue;
+
+ ret = do_set_thread_area(&curr->tls);
+ if (ret)
+ goto out;
+
+ curr->flushed = 1;
+ }
+out:
+ return ret;
+}
+
+/* Verify if we need to do a flush for the new process, i.e. if there are any
+ * present desc's, only if they haven't been flushed.
+ */
+static inline int needs_TLS_update(struct task_struct *task)
+{
+ int i;
+ int ret = 0;
+
+ for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
+ struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
+
+ /* Can't test curr->present, we may need to clear a descriptor
+ * which had a value. */
+ if (curr->flushed)
+ continue;
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+
+/* On a newly forked process, the TLS descriptors haven't yet been flushed. So
+ * we mark them as such and the first switch_to will do the job.
+ */
+void clear_flushed_tls(struct task_struct *task)
+{
+ int i;
+
+ for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
+ struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
+
+ /* Still correct to do this, if it wasn't present on the host it
+ * will remain as flushed as it was. */
+ if (!curr->present)
+ continue;
+
+ curr->flushed = 0;
+ }
+}
+
+/* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
+ * common host process. So this is needed in SKAS0 too.
+ *
+ * However, if each thread had a different host process (and this was discussed
+ * for SMP support) this won't be needed.
+ *
+ * And this will not need be used when (and if) we'll add support to the host
+ * SKAS patch. */
+
+int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
+{
+ if (!host_supports_tls)
+ return 0;
+
+ /* We have no need whatsoever to switch TLS for kernel threads; beyond
+ * that, that would also result in us calling os_set_thread_area with
+ * userspace_pid[cpu] == 0, which gives an error. */
+ if (likely(to->mm))
+ return load_TLS(O_FORCE, to);
+
+ return 0;
+}
+
+int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
+{
+ if (!host_supports_tls)
+ return 0;
+
+ if (needs_TLS_update(to))
+ return load_TLS(0, to);
+
+ return 0;
+}
+
+static int set_tls_entry(struct task_struct* task, struct user_desc *info,
+ int idx, int flushed)
+{
+ struct thread_struct *t = &task->thread;
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
+ t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
+ t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
+
+ return 0;
+}
+
+int arch_copy_tls(struct task_struct *new)
+{
+ struct user_desc info;
+ int idx, ret = -EFAULT;
+
+ if (copy_from_user(&info,
+ (void __user *) UPT_ESI(&new->thread.regs.regs),
+ sizeof(info)))
+ goto out;
+
+ ret = -EINVAL;
+ if (LDT_empty(&info))
+ goto out;
+
+ idx = info.entry_number;
+
+ ret = set_tls_entry(new, &info, idx, 0);
+out:
+ return ret;
+}
+
+/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
+static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
+{
+ struct thread_struct *t = &task->thread;
+
+ if (!t->arch.tls_array)
+ goto clear;
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
+ goto clear;
+
+ *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
+
+out:
+ /* Temporary debugging check, to make sure that things have been
+ * flushed. This could be triggered if load_TLS() failed.
+ */
+ if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
+ printk(KERN_ERR "get_tls_entry: task with pid %d got here "
+ "without flushed TLS.", current->pid);
+ }
+
+ return 0;
+clear:
+ /* When the TLS entry has not been set, the values read to user in the
+ * tls_array are 0 (because it's cleared at boot, see
+ * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
+ */
+ clear_user_desc(info);
+ info->entry_number = idx;
+ goto out;
+}
+
+asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+ int idx, ret;
+
+ if (!host_supports_tls)
+ return -ENOSYS;
+
+ if (copy_from_user(&info, user_desc, sizeof(info)))
+ return -EFAULT;
+
+ idx = info.entry_number;
+
+ if (idx == -1) {
+ idx = get_free_idx(current);
+ if (idx < 0)
+ return idx;
+ info.entry_number = idx;
+ /* Tell the user which slot we chose for him.*/
+ if (put_user(idx, &user_desc->entry_number))
+ return -EFAULT;
+ }
+
+ ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
+ if (ret)
+ return ret;
+ return set_tls_entry(current, &info, idx, 1);
+}
+
+/*
+ * Perform set_thread_area on behalf of the traced child.
+ * Note: error handling is not done on the deferred load, and this differ from
+ * i386. However the only possible error are caused by bugs.
+ */
+int ptrace_set_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+
+ if (!host_supports_tls)
+ return -EIO;
+
+ if (copy_from_user(&info, user_desc, sizeof(info)))
+ return -EFAULT;
+
+ return set_tls_entry(child, &info, idx, 0);
+}
+
+asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+ int idx, ret;
+
+ if (!host_supports_tls)
+ return -ENOSYS;
+
+ if (get_user(idx, &user_desc->entry_number))
+ return -EFAULT;
+
+ ret = get_tls_entry(current, &info, idx);
+ if (ret < 0)
+ goto out;
+
+ if (copy_to_user(user_desc, &info, sizeof(info)))
+ ret = -EFAULT;
+
+out:
+ return ret;
+}
+
+/*
+ * Perform get_thread_area on behalf of the traced child.
+ */
+int ptrace_get_thread_area(struct task_struct *child, int idx,
+ struct user_desc __user *user_desc)
+{
+ struct user_desc info;
+ int ret;
+
+ if (!host_supports_tls)
+ return -EIO;
+
+ ret = get_tls_entry(child, &info, idx);
+ if (ret < 0)
+ goto out;
+
+ if (copy_to_user(user_desc, &info, sizeof(info)))
+ ret = -EFAULT;
+out:
+ return ret;
+}
+
+
+/* XXX: This part is probably common to i386 and x86-64. Don't create a common
+ * file for now, do that when implementing x86-64 support.*/
+static int __init __setup_host_supports_tls(void) {
+ check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
+ if (host_supports_tls) {
+ printk(KERN_INFO "Host TLS support detected\n");
+ printk(KERN_INFO "Detected host type: ");
+ switch (host_gdt_entry_tls_min) {
+ case GDT_ENTRY_TLS_MIN_I386:
+ printk("i386\n");
+ break;
+ case GDT_ENTRY_TLS_MIN_X86_64:
+ printk("x86_64\n");
+ break;
+ }
+ } else
+ printk(KERN_ERR " Host TLS support NOT detected! "
+ "TLS support inside UML will not work\n");
+ return 1;
+}
+
+__initcall(__setup_host_supports_tls);
diff --git a/trunk/arch/um/sys-x86_64/Makefile b/trunk/arch/um/sys-x86_64/Makefile
index a351091fbd99..b5fc22babddf 100644
--- a/trunk/arch/um/sys-x86_64/Makefile
+++ b/trunk/arch/um/sys-x86_64/Makefile
@@ -4,31 +4,23 @@
# Licensed under the GPL
#
-#XXX: why into lib-y?
-lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o ldt.o mem.o memcpy.o \
- ptrace.o ptrace_user.o sigcontext.o signal.o syscalls.o \
- syscall_table.o sysrq.o thunk.o
-lib-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
+obj-y = bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
+ sigcontext.o signal.o syscalls.o syscall_table.o sysrq.o ksyms.o \
+ tls.o
-obj-y := ksyms.o
-obj-$(CONFIG_MODULES) += module.o um_module.o
+obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
+obj-$(CONFIG_MODULES) += um_module.o
-USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o
+subarch-obj-y = lib/bitops.o lib/csum-partial.o lib/memcpy.o lib/thunk.o
+subarch-obj-$(CONFIG_MODULES) += kernel/module.o
-SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c ldt.c memcpy.S \
- thunk.S module.c
+ldt-y = ../sys-i386/ldt.o
-include arch/um/scripts/Makefile.rules
+USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o
-bitops.c-dir = lib
-csum-copy.S-dir = lib
-csum-partial.c-dir = lib
-csum-wrappers.c-dir = lib
-ldt.c-dir = /arch/um/sys-i386
-memcpy.S-dir = lib
-thunk.S-dir = lib
-module.c-dir = kernel
+include arch/um/scripts/Makefile.rules
-$(obj)/stub_segv.o: _c_flags = $(call unprofile,$(CFLAGS))
+extra-$(CONFIG_MODE_TT) += unmap.o
-include arch/um/scripts/Makefile.unmap
+$(obj)/stub_segv.o $(obj)/unmap.o: \
+ _c_flags = $(call unprofile,$(CFLAGS))
diff --git a/trunk/arch/um/sys-x86_64/tls.c b/trunk/arch/um/sys-x86_64/tls.c
new file mode 100644
index 000000000000..ce1bf1b81c43
--- /dev/null
+++ b/trunk/arch/um/sys-x86_64/tls.c
@@ -0,0 +1,14 @@
+#include "linux/sched.h"
+
+void debug_arch_force_load_TLS(void)
+{
+}
+
+void clear_flushed_tls(struct task_struct *task)
+{
+}
+
+int arch_copy_tls(struct task_struct *t)
+{
+ return 0;
+}
diff --git a/trunk/arch/x86_64/ia32/vsyscall-sigreturn.S b/trunk/arch/x86_64/ia32/vsyscall-sigreturn.S
index d90321fe9bba..1384367cdbe1 100644
--- a/trunk/arch/x86_64/ia32/vsyscall-sigreturn.S
+++ b/trunk/arch/x86_64/ia32/vsyscall-sigreturn.S
@@ -32,9 +32,28 @@ __kernel_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
.section .eh_frame,"a",@progbits
+.LSTARTFRAMES:
+ .long .LENDCIES-.LSTARTCIES
+.LSTARTCIES:
+ .long 0 /* CIE ID */
+ .byte 1 /* Version number */
+ .string "zRS" /* NUL-terminated augmentation string */
+ .uleb128 1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 8 /* Return address register column */
+ .uleb128 1 /* Augmentation value length */
+ .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+ .byte 0x0c /* DW_CFA_def_cfa */
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 /* DW_CFA_offset, column 0x8 */
+ .uleb128 1
+ .align 4
+.LENDCIES:
+
.long .LENDFDE2-.LSTARTFDE2 /* Length FDE */
.LSTARTFDE2:
- .long .LSTARTFDE2-.LSTARTFRAME /* CIE pointer */
+ .long .LSTARTFDE2-.LSTARTFRAMES /* CIE pointer */
/* HACK: The dwarf2 unwind routines will subtract 1 from the
return address to get an address in the middle of the
presumed call instruction. Since we didn't get here via
@@ -97,7 +116,7 @@ __kernel_rt_sigreturn:
.long .LENDFDE3-.LSTARTFDE3 /* Length FDE */
.LSTARTFDE3:
- .long .LSTARTFDE3-.LSTARTFRAME /* CIE pointer */
+ .long .LSTARTFDE3-.LSTARTFRAMES /* CIE pointer */
/* HACK: See above wrt unwind library assumptions. */
.long .LSTART_rt_sigreturn-1-. /* PC-relative start address */
.long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
diff --git a/trunk/arch/x86_64/kernel/apic.c b/trunk/arch/x86_64/kernel/apic.c
index d54620147e8e..100a30c40044 100644
--- a/trunk/arch/x86_64/kernel/apic.c
+++ b/trunk/arch/x86_64/kernel/apic.c
@@ -615,7 +615,7 @@ static int __init apic_set_verbosity(char *str)
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug", str);
- return 0;
+ return 1;
}
__setup("apic=", apic_set_verbosity);
@@ -1137,35 +1137,35 @@ int __init APIC_init_uniprocessor (void)
static __init int setup_disableapic(char *str)
{
disable_apic = 1;
- return 0;
+ return 1;
}
static __init int setup_nolapic(char *str)
{
disable_apic = 1;
- return 0;
+ return 1;
}
static __init int setup_noapictimer(char *str)
{
if (str[0] != ' ' && str[0] != 0)
- return -1;
+ return 0;
disable_apic_timer = 1;
- return 0;
+ return 1;
}
static __init int setup_apicmaintimer(char *str)
{
apic_runs_main_timer = 1;
nohpet = 1;
- return 0;
+ return 1;
}
__setup("apicmaintimer", setup_apicmaintimer);
static __init int setup_noapicmaintimer(char *str)
{
apic_runs_main_timer = -1;
- return 0;
+ return 1;
}
__setup("noapicmaintimer", setup_noapicmaintimer);
diff --git a/trunk/arch/x86_64/kernel/early_printk.c b/trunk/arch/x86_64/kernel/early_printk.c
index 13af920b6594..b93ef5b51980 100644
--- a/trunk/arch/x86_64/kernel/early_printk.c
+++ b/trunk/arch/x86_64/kernel/early_printk.c
@@ -221,7 +221,7 @@ int __init setup_early_printk(char *opt)
char buf[256];
if (early_console_initialized)
- return -1;
+ return 1;
strlcpy(buf,opt,sizeof(buf));
space = strchr(buf, ' ');
diff --git a/trunk/arch/x86_64/kernel/mce.c b/trunk/arch/x86_64/kernel/mce.c
index 04282ef9fbd4..10b3e348fc99 100644
--- a/trunk/arch/x86_64/kernel/mce.c
+++ b/trunk/arch/x86_64/kernel/mce.c
@@ -501,7 +501,7 @@ static struct miscdevice mce_log_device = {
static int __init mcheck_disable(char *str)
{
mce_dont_init = 1;
- return 0;
+ return 1;
}
/* mce=off disables machine check. Note you can reenable it later
@@ -521,7 +521,7 @@ static int __init mcheck_enable(char *str)
get_option(&str, &tolerant);
else
printk("mce= argument %s ignored. Please use /sys", str);
- return 0;
+ return 1;
}
__setup("nomce", mcheck_disable);
diff --git a/trunk/arch/x86_64/kernel/pmtimer.c b/trunk/arch/x86_64/kernel/pmtimer.c
index ee5ee4891f3d..b0444a415bd6 100644
--- a/trunk/arch/x86_64/kernel/pmtimer.c
+++ b/trunk/arch/x86_64/kernel/pmtimer.c
@@ -121,7 +121,7 @@ unsigned int do_gettimeoffset_pm(void)
static int __init nopmtimer_setup(char *s)
{
pmtmr_ioport = 0;
- return 0;
+ return 1;
}
__setup("nopmtimer", nopmtimer_setup);
diff --git a/trunk/arch/x86_64/kernel/setup.c b/trunk/arch/x86_64/kernel/setup.c
index d1f3e9272c05..0856ad444f90 100644
--- a/trunk/arch/x86_64/kernel/setup.c
+++ b/trunk/arch/x86_64/kernel/setup.c
@@ -540,7 +540,7 @@ void __init alternative_instructions(void)
static int __init noreplacement_setup(char *s)
{
no_replacement = 1;
- return 0;
+ return 1;
}
__setup("noreplacement", noreplacement_setup);
diff --git a/trunk/arch/x86_64/kernel/setup64.c b/trunk/arch/x86_64/kernel/setup64.c
index eabdb63fec31..8a691fa6d393 100644
--- a/trunk/arch/x86_64/kernel/setup64.c
+++ b/trunk/arch/x86_64/kernel/setup64.c
@@ -55,7 +55,7 @@ int __init nonx_setup(char *str)
do_not_nx = 1;
__supported_pte_mask &= ~_PAGE_NX;
}
- return 0;
+ return 1;
}
__setup("noexec=", nonx_setup); /* parsed early actually */
@@ -74,7 +74,7 @@ static int __init nonx32_setup(char *str)
force_personality32 &= ~READ_IMPLIES_EXEC;
else if (!strcmp(str, "off"))
force_personality32 |= READ_IMPLIES_EXEC;
- return 0;
+ return 1;
}
__setup("noexec32=", nonx32_setup);
diff --git a/trunk/arch/x86_64/kernel/smpboot.c b/trunk/arch/x86_64/kernel/smpboot.c
index ea48fa638070..71a7222cf9ce 100644
--- a/trunk/arch/x86_64/kernel/smpboot.c
+++ b/trunk/arch/x86_64/kernel/smpboot.c
@@ -353,7 +353,7 @@ static void __cpuinit tsc_sync_wait(void)
static __init int notscsync_setup(char *s)
{
notscsync = 1;
- return 0;
+ return 1;
}
__setup("notscsync", notscsync_setup);
diff --git a/trunk/arch/x86_64/kernel/time.c b/trunk/arch/x86_64/kernel/time.c
index 473b514b66e4..ef8bc46dc140 100644
--- a/trunk/arch/x86_64/kernel/time.c
+++ b/trunk/arch/x86_64/kernel/time.c
@@ -1306,7 +1306,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static int __init nohpet_setup(char *s)
{
nohpet = 1;
- return 0;
+ return 1;
}
__setup("nohpet", nohpet_setup);
@@ -1314,7 +1314,7 @@ __setup("nohpet", nohpet_setup);
int __init notsc_setup(char *s)
{
notsc = 1;
- return 0;
+ return 1;
}
__setup("notsc", notsc_setup);
diff --git a/trunk/arch/x86_64/kernel/traps.c b/trunk/arch/x86_64/kernel/traps.c
index edaa9fe654dc..6bda322d3caf 100644
--- a/trunk/arch/x86_64/kernel/traps.c
+++ b/trunk/arch/x86_64/kernel/traps.c
@@ -973,14 +973,14 @@ void __init trap_init(void)
static int __init oops_dummy(char *s)
{
panic_on_oops = 1;
- return -1;
+ return 1;
}
__setup("oops=", oops_dummy);
static int __init kstack_setup(char *s)
{
kstack_depth_to_print = simple_strtoul(s,NULL,0);
- return 0;
+ return 1;
}
__setup("kstack=", kstack_setup);
diff --git a/trunk/arch/x86_64/kernel/x8664_ksyms.c b/trunk/arch/x86_64/kernel/x8664_ksyms.c
index d96a9348e5a2..d78f46056bda 100644
--- a/trunk/arch/x86_64/kernel/x8664_ksyms.c
+++ b/trunk/arch/x86_64/kernel/x8664_ksyms.c
@@ -102,8 +102,6 @@ EXPORT_SYMBOL(cpu_callout_map);
EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(get_wchan);
-
EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL_GPL(set_nmi_callback);
diff --git a/trunk/arch/x86_64/mm/fault.c b/trunk/arch/x86_64/mm/fault.c
index 316c53de47bd..55250593d8c9 100644
--- a/trunk/arch/x86_64/mm/fault.c
+++ b/trunk/arch/x86_64/mm/fault.c
@@ -623,6 +623,6 @@ void vmalloc_sync_all(void)
static int __init enable_pagefaulttrace(char *str)
{
page_fault_trace = 1;
- return 0;
+ return 1;
}
__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/trunk/arch/xtensa/kernel/xtensa_ksyms.c b/trunk/arch/xtensa/kernel/xtensa_ksyms.c
index efae56a51475..152b9370789b 100644
--- a/trunk/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/trunk/arch/xtensa/kernel/xtensa_ksyms.c
@@ -113,8 +113,6 @@ EXPORT_SYMBOL(__xtensa_copy_user);
// FIXME EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(get_wchan);
-
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
diff --git a/trunk/block/Kconfig b/trunk/block/Kconfig
index 5536839886ff..b6f5f0a79655 100644
--- a/trunk/block/Kconfig
+++ b/trunk/block/Kconfig
@@ -27,10 +27,10 @@ config BLK_DEV_IO_TRACE
config LSF
bool "Support for Large Single Files"
depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
- default n
help
- When CONFIG_LBD is disabled, say Y here if you want to
- handle large file(bigger than 2TB), otherwise say N.
- When CONFIG_LBD is enabled, Y is set automatically.
+ Say Y here if you want to be able to handle very large files (bigger
+ than 2TB), otherwise say N.
+
+ If unsure, say Y.
source block/Kconfig.iosched
diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c
index 56c2ed06a9e2..0d6be03d929e 100644
--- a/trunk/block/elevator.c
+++ b/trunk/block/elevator.c
@@ -145,7 +145,7 @@ static int __init elevator_setup(char *str)
strcpy(chosen_elevator, "anticipatory");
else
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
- return 0;
+ return 1;
}
__setup("elevator=", elevator_setup);
diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c
index db4c60c802d6..5a8d3bf02f17 100644
--- a/trunk/block/genhd.c
+++ b/trunk/block/genhd.c
@@ -17,8 +17,6 @@
#include
#include
-#define MAX_PROBE_HASH 255 /* random */
-
static struct subsystem block_subsys;
static DEFINE_MUTEX(block_subsys_lock);
@@ -31,108 +29,29 @@ static struct blk_major_name {
struct blk_major_name *next;
int major;
char name[16];
-} *major_names[MAX_PROBE_HASH];
+} *major_names[BLKDEV_MAJOR_HASH_SIZE];
/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(int major)
{
- return major % MAX_PROBE_HASH;
-}
-
-struct blkdev_info {
- int index;
- struct blk_major_name *bd;
-};
-
-/*
- * iterate over a list of blkdev_info structures. allows
- * the major_names array to be iterated over from outside this file
- * must be called with the block_subsys_lock held
- */
-void *get_next_blkdev(void *dev)
-{
- struct blkdev_info *info;
-
- if (dev == NULL) {
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto out;
- info->index=0;
- info->bd = major_names[info->index];
- if (info->bd)
- goto out;
- } else {
- info = dev;
- }
-
- while (info->index < ARRAY_SIZE(major_names)) {
- if (info->bd)
- info->bd = info->bd->next;
- if (info->bd)
- goto out;
- /*
- * No devices on this chain, move to the next
- */
- info->index++;
- info->bd = (info->index < ARRAY_SIZE(major_names)) ?
- major_names[info->index] : NULL;
- if (info->bd)
- goto out;
- }
-
-out:
- return info;
-}
-
-void *acquire_blkdev_list(void)
-{
- mutex_lock(&block_subsys_lock);
- return get_next_blkdev(NULL);
-}
-
-void release_blkdev_list(void *dev)
-{
- mutex_unlock(&block_subsys_lock);
- kfree(dev);
+ return major % BLKDEV_MAJOR_HASH_SIZE;
}
+#ifdef CONFIG_PROC_FS
-/*
- * Count the number of records in the blkdev_list.
- * must be called with the block_subsys_lock held
- */
-int count_blkdev_list(void)
+void blkdev_show(struct seq_file *f, off_t offset)
{
- struct blk_major_name *n;
- int i, count;
+ struct blk_major_name *dp;
- count = 0;
-
- for (i = 0; i < ARRAY_SIZE(major_names); i++) {
- for (n = major_names[i]; n; n = n->next)
- count++;
+ if (offset < BLKDEV_MAJOR_HASH_SIZE) {
+ mutex_lock(&block_subsys_lock);
+ for (dp = major_names[offset]; dp; dp = dp->next)
+ seq_printf(f, "%3d %s\n", dp->major, dp->name);
+ mutex_unlock(&block_subsys_lock);
}
-
- return count;
-}
-
-/*
- * extract the major and name values from a blkdev_info struct
- * passed in as a void to *dev. Must be called with
- * block_subsys_lock held
- */
-int get_blkdev_info(void *dev, int *major, char **name)
-{
- struct blkdev_info *info = dev;
-
- if (info->bd == NULL)
- return 1;
-
- *major = info->bd->major;
- *name = info->bd->name;
- return 0;
}
+#endif /* CONFIG_PROC_FS */
int register_blkdev(unsigned int major, const char *name)
{
diff --git a/trunk/drivers/Kconfig b/trunk/drivers/Kconfig
index 9f5c0da57c90..5c91d6afb117 100644
--- a/trunk/drivers/Kconfig
+++ b/trunk/drivers/Kconfig
@@ -64,6 +64,8 @@ source "drivers/usb/Kconfig"
source "drivers/mmc/Kconfig"
+source "drivers/leds/Kconfig"
+
source "drivers/infiniband/Kconfig"
source "drivers/sn/Kconfig"
diff --git a/trunk/drivers/Makefile b/trunk/drivers/Makefile
index 424955274e60..d6e8ffbd8132 100644
--- a/trunk/drivers/Makefile
+++ b/trunk/drivers/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_MMC) += mmc/
+obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
diff --git a/trunk/drivers/acpi/ec.c b/trunk/drivers/acpi/ec.c
index 79b09d76c180..eee0864ba300 100644
--- a/trunk/drivers/acpi/ec.c
+++ b/trunk/drivers/acpi/ec.c
@@ -1572,7 +1572,7 @@ static void __exit acpi_ec_exit(void)
static int __init acpi_fake_ecdt_setup(char *str)
{
acpi_fake_ecdt_enabled = 1;
- return 0;
+ return 1;
}
__setup("acpi_fake_ecdt", acpi_fake_ecdt_setup);
@@ -1591,7 +1591,7 @@ static int __init acpi_ec_set_intr_mode(char *str)
acpi_ec_driver.ops.add = acpi_ec_poll_add;
}
printk(KERN_INFO PREFIX "EC %s mode.\n", intr ? "interrupt" : "polling");
- return 0;
+ return 1;
}
__setup("ec_intr=", acpi_ec_set_intr_mode);
diff --git a/trunk/drivers/block/amiflop.c b/trunk/drivers/block/amiflop.c
index b6e290956214..2a8af685926f 100644
--- a/trunk/drivers/block/amiflop.c
+++ b/trunk/drivers/block/amiflop.c
@@ -1850,6 +1850,7 @@ static int __init amiga_floppy_setup (char *str)
return 0;
printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
fd_def_df0 = n;
+ return 1;
}
__setup("floppy=", amiga_floppy_setup);
diff --git a/trunk/drivers/char/drm/drmP.h b/trunk/drivers/char/drm/drmP.h
index 107df9fdba4e..edc72a6348a7 100644
--- a/trunk/drivers/char/drm/drmP.h
+++ b/trunk/drivers/char/drm/drmP.h
@@ -357,6 +357,12 @@ typedef struct drm_freelist {
spinlock_t lock;
} drm_freelist_t;
+typedef struct drm_dma_handle {
+ dma_addr_t busaddr;
+ void *vaddr;
+ size_t size;
+} drm_dma_handle_t;
+
/**
* Buffer entry. There is one of this for each buffer size order.
*/
@@ -366,7 +372,7 @@ typedef struct drm_buf_entry {
drm_buf_t *buflist; /**< buffer list */
int seg_count;
int page_order;
- unsigned long *seglist;
+ drm_dma_handle_t **seglist;
drm_freelist_t freelist;
} drm_buf_entry_t;
@@ -483,12 +489,6 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock;
} drm_sigdata_t;
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
/**
* Mappings list
*/
@@ -813,8 +813,6 @@ extern void drm_mem_init(void);
extern int drm_mem_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
-extern unsigned long drm_alloc_pages(int order, int area);
-extern void drm_free_pages(unsigned long address, int order, int area);
extern void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev);
extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
diff --git a/trunk/drivers/char/drm/drm_bufs.c b/trunk/drivers/char/drm/drm_bufs.c
index e2637b4d51de..8a9cf12e6183 100644
--- a/trunk/drivers/char/drm/drm_bufs.c
+++ b/trunk/drivers/char/drm/drm_bufs.c
@@ -474,8 +474,7 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
if (entry->seglist[i]) {
- drm_free_pages(entry->seglist[i],
- entry->page_order, DRM_MEM_DMA);
+ drm_pci_free(dev, entry->seglist[i]);
}
}
drm_free(entry->seglist,
@@ -678,7 +677,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int page_order;
drm_buf_entry_t *entry;
- unsigned long page;
+ drm_dma_handle_t *dmah;
drm_buf_t *buf;
int alignment;
unsigned long offset;
@@ -781,8 +780,10 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
page_count = 0;
while (entry->buf_count < count) {
- page = drm_alloc_pages(page_order, DRM_MEM_DMA);
- if (!page) {
+
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+
+ if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
@@ -794,13 +795,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- entry->seglist[entry->seg_count++] = page;
+ entry->seglist[entry->seg_count++] = dmah;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
- page + PAGE_SIZE * i);
+ (unsigned long)dmah->vaddr + PAGE_SIZE * i);
temp_pagelist[dma->page_count + page_count++]
- = page + PAGE_SIZE * i;
+ = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
@@ -811,7 +812,8 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(page + offset);
+ buf->address = (void *)(dmah->vaddr + offset);
+ buf->bus_address = dmah->busaddr + offset;
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
diff --git a/trunk/drivers/char/drm/drm_dma.c b/trunk/drivers/char/drm/drm_dma.c
index 2afab95ca036..892db7096986 100644
--- a/trunk/drivers/char/drm/drm_dma.c
+++ b/trunk/drivers/char/drm/drm_dma.c
@@ -85,9 +85,7 @@ void drm_dma_takedown(drm_device_t * dev)
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
- drm_free_pages(dma->bufs[i].seglist[j],
- dma->bufs[i].page_order,
- DRM_MEM_DMA);
+ drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
}
drm_free(dma->bufs[i].seglist,
diff --git a/trunk/drivers/char/drm/drm_memory.c b/trunk/drivers/char/drm/drm_memory.c
index 8074771e348f..dddf8de66143 100644
--- a/trunk/drivers/char/drm/drm_memory.c
+++ b/trunk/drivers/char/drm/drm_memory.c
@@ -79,65 +79,6 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
return pt;
}
-/**
- * Allocate pages.
- *
- * \param order size order.
- * \param area memory area. (Not used.)
- * \return page address on success, or zero on failure.
- *
- * Allocate and reserve free pages.
- */
-unsigned long drm_alloc_pages(int order, int area)
-{
- unsigned long address;
- unsigned long bytes = PAGE_SIZE << order;
- unsigned long addr;
- unsigned int sz;
-
- address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
- if (!address)
- return 0;
-
- /* Zero */
- memset((void *)address, 0, bytes);
-
- /* Reserve */
- for (addr = address, sz = bytes;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
- }
-
- return address;
-}
-
-/**
- * Free pages.
- *
- * \param address address of the pages to free.
- * \param order size order.
- * \param area memory area. (Not used.)
- *
- * Unreserve and free pages allocated by alloc_pages().
- */
-void drm_free_pages(unsigned long address, int order, int area)
-{
- unsigned long bytes = PAGE_SIZE << order;
- unsigned long addr;
- unsigned int sz;
-
- if (!address)
- return;
-
- /* Unreserve */
- for (addr = address, sz = bytes;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- }
-
- free_pages(address, order);
-}
-
#if __OS_HAS_AGP
/** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
diff --git a/trunk/drivers/char/drm/drm_memory_debug.h b/trunk/drivers/char/drm/drm_memory_debug.h
index e84605fc54af..7868341817da 100644
--- a/trunk/drivers/char/drm/drm_memory_debug.h
+++ b/trunk/drivers/char/drm/drm_memory_debug.h
@@ -206,76 +206,6 @@ void drm_free (void *pt, size_t size, int area) {
}
}
-unsigned long drm_alloc_pages (int order, int area) {
- unsigned long address;
- unsigned long bytes = PAGE_SIZE << order;
- unsigned long addr;
- unsigned int sz;
-
- spin_lock(&drm_mem_lock);
- if ((drm_ram_used >> PAGE_SHIFT)
- > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
- spin_unlock(&drm_mem_lock);
- return 0;
- }
- spin_unlock(&drm_mem_lock);
-
- address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
- if (!address) {
- spin_lock(&drm_mem_lock);
- ++drm_mem_stats[area].fail_count;
- spin_unlock(&drm_mem_lock);
- return 0;
- }
- spin_lock(&drm_mem_lock);
- ++drm_mem_stats[area].succeed_count;
- drm_mem_stats[area].bytes_allocated += bytes;
- drm_ram_used += bytes;
- spin_unlock(&drm_mem_lock);
-
- /* Zero outside the lock */
- memset((void *)address, 0, bytes);
-
- /* Reserve */
- for (addr = address, sz = bytes;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page(addr));
- }
-
- return address;
-}
-
-void drm_free_pages (unsigned long address, int order, int area) {
- unsigned long bytes = PAGE_SIZE << order;
- int alloc_count;
- int free_count;
- unsigned long addr;
- unsigned int sz;
-
- if (!address) {
- DRM_MEM_ERROR(area, "Attempt to free address 0\n");
- } else {
- /* Unreserve */
- for (addr = address, sz = bytes;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- }
- free_pages(address, order);
- }
-
- spin_lock(&drm_mem_lock);
- free_count = ++drm_mem_stats[area].free_count;
- alloc_count = drm_mem_stats[area].succeed_count;
- drm_mem_stats[area].bytes_freed += bytes;
- drm_ram_used -= bytes;
- spin_unlock(&drm_mem_lock);
- if (free_count > alloc_count) {
- DRM_MEM_ERROR(area,
- "Excess frees: %d frees, %d allocs\n",
- free_count, alloc_count);
- }
-}
-
void *drm_ioremap (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
diff --git a/trunk/drivers/char/drm/drm_pci.c b/trunk/drivers/char/drm/drm_pci.c
index 1fd7ff164817..b28ca9cea8a2 100644
--- a/trunk/drivers/char/drm/drm_pci.c
+++ b/trunk/drivers/char/drm/drm_pci.c
@@ -50,6 +50,10 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
+#if 1
+ unsigned long addr;
+ size_t sz;
+#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;
@@ -79,7 +83,7 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
return NULL;
dmah->size = size;
- dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
#ifdef DRM_DEBUG_MEMORY
if (dmah->vaddr == NULL) {
@@ -104,18 +108,29 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
memset(dmah->vaddr, 0, size);
+ /* XXX - Is virt_to_page() legal for consistent mem? */
+ /* Reserve */
+ for (addr = (unsigned long)dmah->vaddr, sz = size;
+ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+ SetPageReserved(virt_to_page(addr));
+ }
+
return dmah;
}
EXPORT_SYMBOL(drm_pci_alloc);
/**
- * \brief Free a PCI consistent memory block with freeing its descriptor.
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
{
+#if 1
+ unsigned long addr;
+ size_t sz;
+#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;
int alloc_count;
@@ -127,8 +142,14 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
#endif
} else {
- pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
- dmah->busaddr);
+ /* XXX - Is virt_to_page() legal for consistent mem? */
+ /* Unreserve */
+ for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ }
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
}
#ifdef DRM_DEBUG_MEMORY
diff --git a/trunk/drivers/char/drm/drm_pciids.h b/trunk/drivers/char/drm/drm_pciids.h
index 2c17e88a8847..b1bb3c7b568d 100644
--- a/trunk/drivers/char/drm/drm_pciids.h
+++ b/trunk/drivers/char/drm/drm_pciids.h
@@ -3,49 +3,69 @@
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
- {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350},\
+ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP}, \
{0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \
{0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
- {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS250|CHIP_IS_IGP}, \
+ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
- {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS250|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
- {0x1002, 0x4964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
- {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
- {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
- {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
- {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
- {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
+ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+ {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
- {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
- {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
- {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
- {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
- {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
- {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -53,44 +73,66 @@
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x5149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x514A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x514B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x514E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x514F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
{0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
{0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
{0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
{0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
- {0x1002, 0x5168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x5169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
- {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
- {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
- {0x1002, 0x5837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
- {0x1002, 0x5963, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
- {0x1002, 0x5968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
- {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
- {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
- {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
- {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
- {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
- {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
+ {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP}, \
+ {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/trunk/drivers/char/drm/i915_dma.c b/trunk/drivers/char/drm/i915_dma.c
index 1ff4c7ca0bff..9f4b8ce4c05e 100644
--- a/trunk/drivers/char/drm/i915_dma.c
+++ b/trunk/drivers/char/drm/i915_dma.c
@@ -495,8 +495,6 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
}
}
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
-
i915_emit_breadcrumb(dev);
return 0;
diff --git a/trunk/drivers/char/drm/i915_irq.c b/trunk/drivers/char/drm/i915_irq.c
index d3879ac9970f..a752afd86ab8 100644
--- a/trunk/drivers/char/drm/i915_irq.c
+++ b/trunk/drivers/char/drm/i915_irq.c
@@ -53,6 +53,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
if (temp & USER_INT_FLAG)
DRM_WAKEUP(&dev_priv->irq_queue);
diff --git a/trunk/drivers/char/drm/r300_cmdbuf.c b/trunk/drivers/char/drm/r300_cmdbuf.c
index c08fa5076f05..b108c7f913b2 100644
--- a/trunk/drivers/char/drm/r300_cmdbuf.c
+++ b/trunk/drivers/char/drm/r300_cmdbuf.c
@@ -214,13 +214,13 @@ void r300_init_reg_flags(void)
ADD_RANGE(0x4F54, 1);
ADD_RANGE(R300_TX_FILTER_0, 16);
- ADD_RANGE(R300_TX_UNK1_0, 16);
+ ADD_RANGE(R300_TX_FILTER1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
ADD_RANGE(R300_TX_PITCH_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
- ADD_RANGE(R300_TX_UNK4_0, 16);
+ ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
@@ -242,8 +242,10 @@ static __inline__ int r300_check_range(unsigned reg, int count)
return 0;
}
- /* we expect offsets passed to the framebuffer to be either within video memory or
- within AGP space */
+/*
+ * we expect offsets passed to the framebuffer to be either within video
+ * memory or within AGP space
+ */
static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
u32 offset)
{
@@ -251,11 +253,11 @@ static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
but this value is not being kept.
This code is correct for now (does the same thing as the
code that sets MC_FB_LOCATION) in radeon_cp.c */
- if ((offset >= dev_priv->fb_location) &&
- (offset < dev_priv->gart_vm_start))
+ if (offset >= dev_priv->fb_location &&
+ offset < (dev_priv->fb_location + dev_priv->fb_size))
return 0;
- if ((offset >= dev_priv->gart_vm_start) &&
- (offset < dev_priv->gart_vm_start + dev_priv->gart_size))
+ if (offset >= dev_priv->gart_vm_start &&
+ offset < (dev_priv->gart_vm_start + dev_priv->gart_size))
return 0;
return 1;
}
@@ -490,6 +492,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
return 0;
}
+
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
@@ -701,6 +704,64 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
buf->used = 0;
}
+static int r300_scratch(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_r300_cmd_header_t header)
+{
+ u32 *ref_age_base;
+ u32 i, buf_idx, h_pending;
+ RING_LOCALS;
+
+ if (cmdbuf->bufsz <
+ (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
+ return DRM_ERR(EINVAL);
+ }
+
+ if (header.scratch.reg >= 5) {
+ return DRM_ERR(EINVAL);
+ }
+
+ dev_priv->scratch_ages[header.scratch.reg]++;
+
+ ref_age_base = *(u32 **)cmdbuf->buf;
+
+ cmdbuf->buf += sizeof(u64);
+ cmdbuf->bufsz -= sizeof(u64);
+
+ for (i=0; i < header.scratch.n_bufs; i++) {
+ buf_idx = *(u32 *)cmdbuf->buf;
+ buf_idx *= 2; /* 8 bytes per buf */
+
+ if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
+ return DRM_ERR(EINVAL);
+ }
+
+ if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
+ return DRM_ERR(EINVAL);
+ }
+
+ if (h_pending == 0) {
+ return DRM_ERR(EINVAL);
+ }
+
+ h_pending--;
+
+ if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
+ return DRM_ERR(EINVAL);
+ }
+
+ cmdbuf->buf += sizeof(buf_idx);
+ cmdbuf->bufsz -= sizeof(buf_idx);
+ }
+
+ BEGIN_RING(2);
+ OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0));
+ OUT_RING(dev_priv->scratch_ages[header.scratch.reg]);
+ ADVANCE_RING();
+
+ return 0;
+}
+
/**
* Parses and validates a user-supplied command buffer and emits appropriate
* commands on the DMA ring buffer.
@@ -838,6 +899,15 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
}
break;
+ case R300_CMD_SCRATCH:
+ DRM_DEBUG("R300_CMD_SCRATCH\n");
+ ret = r300_scratch(dev_priv, cmdbuf, header);
+ if (ret) {
+ DRM_ERROR("r300_scratch failed\n");
+ goto cleanup;
+ }
+ break;
+
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
diff --git a/trunk/drivers/char/drm/r300_reg.h b/trunk/drivers/char/drm/r300_reg.h
index d1e19954406b..a881f96c983e 100644
--- a/trunk/drivers/char/drm/r300_reg.h
+++ b/trunk/drivers/char/drm/r300_reg.h
@@ -711,8 +711,22 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
# define R300_TX_MAX_ANISO_MASK (14 << 21)
-#define R300_TX_UNK1_0 0x4440
+#define R300_TX_FILTER1_0 0x4440
+# define R300_CHROMA_KEY_MODE_DISABLE 0
+# define R300_CHROMA_KEY_FORCE 1
+# define R300_CHROMA_KEY_BLEND 2
+# define R300_MC_ROUND_NORMAL (0<<2)
+# define R300_MC_ROUND_MPEG4 (1<<2)
# define R300_LOD_BIAS_MASK 0x1fff
+# define R300_EDGE_ANISO_EDGE_DIAG (0<<13)
+# define R300_EDGE_ANISO_EDGE_ONLY (1<<13)
+# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14)
+# define R300_MC_COORD_TRUNCATE_MPEG (1<<14)
+# define R300_TX_TRI_PERF_0_8 (0<<15)
+# define R300_TX_TRI_PERF_1_8 (1<<15)
+# define R300_TX_TRI_PERF_1_4 (2<<15)
+# define R300_TX_TRI_PERF_3_8 (3<<15)
+# define R300_ANISO_THRESHOLD_MASK (7<<17)
#define R300_TX_SIZE_0 0x4480
# define R300_TX_WIDTHMASK_SHIFT 0
@@ -722,6 +736,8 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_UNK23 (1 << 23)
# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
# define R300_TX_SIZE_MASK (15 << 26)
+# define R300_TX_SIZE_PROJECTED (1<<30)
+# define R300_TX_SIZE_TXPITCH_EN (1<<31)
#define R300_TX_FORMAT_0 0x44C0
/* The interpretation of the format word by Wladimir van der Laan */
/* The X, Y, Z and W refer to the layout of the components.
@@ -750,7 +766,8 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
/* 0x16 - some 16 bit green format.. ?? */
-# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
+# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
+# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
/* gap */
/* Floating point formats */
@@ -800,18 +817,20 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_YUV_MODE 0x00800000
-#define R300_TX_PITCH_0 0x4500
+#define R300_TX_PITCH_0 0x4500 /* obvious missing in gap */
#define R300_TX_OFFSET_0 0x4540
/* BEGIN: Guess from R200 */
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define R300_TXO_MACRO_TILE (1 << 2)
+# define R300_TXO_MICRO_TILE (1 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END */
-#define R300_TX_UNK4_0 0x4580
-#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
+#define R300_TX_CHROMA_KEY_0 0x4580 /* 32 bit chroma key */
+#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
/* END */
@@ -868,7 +887,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
# define R300_PFS_NODE_TEX_END_SHIFT 17
# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
-# define R300_PFS_NODE_LAST_NODE (1 << 22)
+/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */
+# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
+# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
/* TEX
// As far as I can tell, texture instructions cannot write into output
@@ -887,6 +908,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
*/
# define R300_FPITX_OPCODE_SHIFT 15
# define R300_FPITX_OP_TEX 1
+# define R300_FPITX_OP_KIL 2
# define R300_FPITX_OP_TXP 3
# define R300_FPITX_OP_TXB 4
@@ -962,9 +984,11 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI1_SRC2C_CONST (1 << 17)
# define R300_FPI1_DSTC_SHIFT 18
# define R300_FPI1_DSTC_MASK (31 << 18)
+# define R300_FPI1_DSTC_REG_MASK_SHIFT 23
# define R300_FPI1_DSTC_REG_X (1 << 23)
# define R300_FPI1_DSTC_REG_Y (1 << 24)
# define R300_FPI1_DSTC_REG_Z (1 << 25)
+# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26
# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
@@ -983,6 +1007,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI3_DSTA_MASK (31 << 18)
# define R300_FPI3_DSTA_REG (1 << 23)
# define R300_FPI3_DSTA_OUTPUT (1 << 24)
+# define R300_FPI3_DSTA_DEPTH (1 << 27)
#define R300_PFS_INSTR0_0 0x48C0
# define R300_FPI0_ARGC_SRC0C_XYZ 0
@@ -1036,7 +1061,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI0_OUTC_FRC (9 << 23)
# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
# define R300_FPI0_OUTC_SAT (1 << 30)
-# define R300_FPI0_UNKNOWN_31 (1 << 31)
+# define R300_FPI0_INSERT_NOP (1 << 31)
#define R300_PFS_INSTR2_0 0x49C0
# define R300_FPI2_ARGA_SRC0C_X 0
diff --git a/trunk/drivers/char/drm/radeon_cp.c b/trunk/drivers/char/drm/radeon_cp.c
index 9bb8ae0c1c27..7f949c9c9691 100644
--- a/trunk/drivers/char/drm/radeon_cp.c
+++ b/trunk/drivers/char/drm/radeon_cp.c
@@ -1118,14 +1118,20 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
{
u32 ring_start, cur_read_ptr;
u32 tmp;
-
- /* Initialize the memory controller */
- RADEON_WRITE(RADEON_MC_FB_LOCATION,
- ((dev_priv->gart_vm_start - 1) & 0xffff0000)
- | (dev_priv->fb_location >> 16));
+
+ /* Initialize the memory controller. With new memory map, the fb location
+ * is not changed, it should have been properly initialized already. Part
+ * of the problem is that the code below is bogus, assuming the GART is
+ * always appended to the fb which is not necessarily the case
+ */
+ if (!dev_priv->new_memmap)
+ RADEON_WRITE(RADEON_MC_FB_LOCATION,
+ ((dev_priv->gart_vm_start - 1) & 0xffff0000)
+ | (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP) {
+ RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
@@ -1153,8 +1159,6 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP) {
- /* set RADEON_AGP_BASE here instead of relying on X from user space */
- RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset
- dev->agp->base + dev_priv->gart_vm_start);
@@ -1174,6 +1178,17 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
entry->handle + tmp_ofs);
}
+ /* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
+#else
+ RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
+#endif
+
+ /* Start with assuming that writeback doesn't work */
+ dev_priv->writeback_works = 0;
+
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
* whenever they are updated.
@@ -1190,28 +1205,9 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
- /* Writeback doesn't seem to work everywhere, test it first */
- DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
- RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
-
- for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
- if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
- 0xdeadbeef)
- break;
- DRM_UDELAY(1);
- }
-
- if (tmp < dev_priv->usec_timeout) {
- dev_priv->writeback_works = 1;
- DRM_DEBUG("writeback test succeeded, tmp=%d\n", tmp);
- } else {
- dev_priv->writeback_works = 0;
- DRM_DEBUG("writeback test failed\n");
- }
- if (radeon_no_wb == 1) {
- dev_priv->writeback_works = 0;
- DRM_DEBUG("writeback forced off\n");
- }
+ /* Turn on bus mastering */
+ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ RADEON_WRITE(RADEON_BUS_CNTL, tmp);
dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
@@ -1223,26 +1219,45 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
- /* Set ring buffer size */
-#ifdef __BIG_ENDIAN
- RADEON_WRITE(RADEON_CP_RB_CNTL,
- dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
-#else
- RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
-#endif
-
radeon_do_wait_for_idle(dev_priv);
- /* Turn on bus mastering */
- tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- RADEON_WRITE(RADEON_BUS_CNTL, tmp);
-
/* Sync everything up */
RADEON_WRITE(RADEON_ISYNC_CNTL,
(RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+
+}
+
+static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
+{
+ u32 tmp;
+
+ /* Writeback doesn't seem to work everywhere, test it here and possibly
+ * enable it if it appears to work
+ */
+ DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
+ RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
+
+ for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+ if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
+ 0xdeadbeef)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (tmp < dev_priv->usec_timeout) {
+ dev_priv->writeback_works = 1;
+ DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+ } else {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback test failed\n");
+ }
+ if (radeon_no_wb == 1) {
+ dev_priv->writeback_works = 0;
+ DRM_INFO("writeback forced off\n");
+ }
}
/* Enable or disable PCI-E GART on the chip */
@@ -1317,6 +1332,14 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
DRM_DEBUG("\n");
+ /* if we require new memory map but we don't have it fail */
+ if ((dev_priv->flags & CHIP_NEW_MEMMAP) && !dev_priv->new_memmap)
+ {
+ DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX\n");
+ radeon_do_cleanup_cp(dev);
+ return DRM_ERR(EINVAL);
+ }
+
if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
{
DRM_DEBUG("Forcing AGP card to PCI mode\n");
@@ -1496,6 +1519,9 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
& 0xffff) << 16;
+ dev_priv->fb_size =
+ ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
+ - dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
((dev_priv->front_offset
@@ -1510,8 +1536,46 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+ dev_priv->fb_location) >> 10));
dev_priv->gart_size = init->gart_size;
- dev_priv->gart_vm_start = dev_priv->fb_location
- + RADEON_READ(RADEON_CONFIG_APER_SIZE);
+
+ /* New let's set the memory map ... */
+ if (dev_priv->new_memmap) {
+ u32 base = 0;
+
+ DRM_INFO("Setting GART location based on new memory map\n");
+
+ /* If using AGP, try to locate the AGP aperture at the same
+ * location in the card and on the bus, though we have to
+ * align it down.
+ */
+#if __OS_HAS_AGP
+ if (dev_priv->flags & CHIP_IS_AGP) {
+ base = dev->agp->base;
+ /* Check if valid */
+ if ((base + dev_priv->gart_size) > dev_priv->fb_location &&
+ base < (dev_priv->fb_location + dev_priv->fb_size)) {
+ DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+ dev->agp->base);
+ base = 0;
+ }
+ }
+#endif
+ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+ if (base == 0) {
+ base = dev_priv->fb_location + dev_priv->fb_size;
+ if (((base + dev_priv->gart_size) & 0xfffffffful)
+ < base)
+ base = dev_priv->fb_location
+ - dev_priv->gart_size;
+ }
+ dev_priv->gart_vm_start = base & 0xffc00000u;
+ if (dev_priv->gart_vm_start != base)
+ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+ base, dev_priv->gart_vm_start);
+ } else {
+ DRM_INFO("Setting GART location based on old memory map\n");
+ dev_priv->gart_vm_start = dev_priv->fb_location +
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
+ }
#if __OS_HAS_AGP
if (dev_priv->flags & CHIP_IS_AGP)
@@ -1596,6 +1660,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->last_buf = 0;
radeon_do_engine_reset(dev);
+ radeon_test_writeback(dev_priv);
return 0;
}
diff --git a/trunk/drivers/char/drm/radeon_drm.h b/trunk/drivers/char/drm/radeon_drm.h
index 9c177a6b2a4c..c8e279e89c2e 100644
--- a/trunk/drivers/char/drm/radeon_drm.h
+++ b/trunk/drivers/char/drm/radeon_drm.h
@@ -222,6 +222,7 @@ typedef union {
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
+#define R300_CMD_SCRATCH 8
typedef union {
unsigned int u;
@@ -247,6 +248,9 @@ typedef union {
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
+ struct {
+ unsigned char cmd_type, reg, n_bufs, flags;
+ } scratch;
} drm_r300_cmd_header_t;
#define RADEON_FRONT 0x1
@@ -697,6 +701,7 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
+#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
/* 1.14: Clients can allocate/free a surface
*/
diff --git a/trunk/drivers/char/drm/radeon_drv.h b/trunk/drivers/char/drm/radeon_drv.h
index 1f7d2ab8c4fc..78345cee8f8e 100644
--- a/trunk/drivers/char/drm/radeon_drv.h
+++ b/trunk/drivers/char/drm/radeon_drv.h
@@ -38,7 +38,7 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20051229"
+#define DRIVER_DATE "20060225"
/* Interface history:
*
@@ -91,9 +91,11 @@
* 1.20- Add support for r300 texrect
* 1.21- Add support for card type getparam
* 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 22
+#define DRIVER_MINOR 24
#define DRIVER_PATCHLEVEL 0
/*
@@ -101,20 +103,21 @@
*/
enum radeon_family {
CHIP_R100,
- CHIP_RS100,
CHIP_RV100,
+ CHIP_RS100,
CHIP_RV200,
- CHIP_R200,
CHIP_RS200,
- CHIP_R250,
- CHIP_RS250,
+ CHIP_R200,
CHIP_RV250,
+ CHIP_RS300,
CHIP_RV280,
CHIP_R300,
- CHIP_RS300,
CHIP_R350,
CHIP_RV350,
+ CHIP_RV380,
CHIP_R420,
+ CHIP_RV410,
+ CHIP_RS400,
CHIP_LAST,
};
@@ -136,9 +139,11 @@ enum radeon_chip_flags {
CHIP_IS_AGP = 0x00080000UL,
CHIP_HAS_HIERZ = 0x00100000UL,
CHIP_IS_PCIE = 0x00200000UL,
+ CHIP_NEW_MEMMAP = 0x00400000UL,
};
-#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
+#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
+ DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR))
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
typedef struct drm_radeon_freelist {
@@ -199,6 +204,8 @@ typedef struct drm_radeon_private {
drm_radeon_sarea_t *sarea_priv;
u32 fb_location;
+ u32 fb_size;
+ int new_memmap;
int gart_size;
u32 gart_vm_start;
@@ -272,6 +279,8 @@ typedef struct drm_radeon_private {
unsigned long pcigart_offset;
drm_ati_pcigart_info gart_info;
+ u32 scratch_ages[5];
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
} drm_radeon_private_t;
diff --git a/trunk/drivers/char/drm/radeon_state.c b/trunk/drivers/char/drm/radeon_state.c
index 7bc27516d425..c5b8f774a599 100644
--- a/trunk/drivers/char/drm/radeon_state.c
+++ b/trunk/drivers/char/drm/radeon_state.c
@@ -45,22 +45,53 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
u32 off = *offset;
struct drm_radeon_driver_file_fields *radeon_priv;
- if (off >= dev_priv->fb_location &&
- off < (dev_priv->gart_vm_start + dev_priv->gart_size))
- return 0;
-
- radeon_priv = filp_priv->driver_priv;
- off += radeon_priv->radeon_fb_delta;
+ /* Hrm ... the story of the offset ... So this function converts
+ * the various ideas of what userland clients might have for an
+ * offset in the card address space into an offset into the card
+ * address space :) So with a sane client, it should just keep
+ * the value intact and just do some boundary checking. However,
+ * not all clients are sane. Some older clients pass us 0 based
+ * offsets relative to the start of the framebuffer and some may
+ * assume the AGP aperture it appended to the framebuffer, so we
+ * try to detect those cases and fix them up.
+ *
+ * Note: It might be a good idea here to make sure the offset lands
+ * in some "allowed" area to protect things like the PCIE GART...
+ */
- DRM_DEBUG("offset fixed up to 0x%x\n", off);
+ /* First, the best case, the offset already lands in either the
+ * framebuffer or the GART mapped space
+ */
+ if ((off >= dev_priv->fb_location &&
+ off < (dev_priv->fb_location + dev_priv->fb_size)) ||
+ (off >= dev_priv->gart_vm_start &&
+ off < (dev_priv->gart_vm_start + dev_priv->gart_size)))
+ return 0;
- if (off < dev_priv->fb_location ||
- off >= (dev_priv->gart_vm_start + dev_priv->gart_size))
- return DRM_ERR(EINVAL);
+ /* Ok, that didn't happen... now check if we have a zero based
+ * offset that fits in the framebuffer + gart space, apply the
+ * magic offset we get from SETPARAM or calculated from fb_location
+ */
+ if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
+ radeon_priv = filp_priv->driver_priv;
+ off += radeon_priv->radeon_fb_delta;
+ }
- *offset = off;
+ /* Finally, assume we aimed at a GART offset if beyond the fb */
+ if (off > (dev_priv->fb_location + dev_priv->fb_size))
+ off = off - (dev_priv->fb_location + dev_priv->fb_size) +
+ dev_priv->gart_vm_start;
- return 0;
+ /* Now recheck and fail if out of bounds */
+ if ((off >= dev_priv->fb_location &&
+ off < (dev_priv->fb_location + dev_priv->fb_size)) ||
+ (off >= dev_priv->gart_vm_start &&
+ off < (dev_priv->gart_vm_start + dev_priv->gart_size))) {
+ DRM_DEBUG("offset fixed up to 0x%x\n", off);
+ *offset = off;
+ return 0;
+ }
+ return DRM_ERR(EINVAL);
}
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
@@ -1939,11 +1970,6 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS)
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_surface_alloc_t alloc;
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_COPY_FROM_USER_IOCTL(alloc,
(drm_radeon_surface_alloc_t __user *) data,
sizeof(alloc));
@@ -1960,12 +1986,7 @@ static int radeon_surface_free(DRM_IOCTL_ARGS)
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_surface_free_t memfree;
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
- DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data,
sizeof(memfree));
if (free_surface(filp, dev_priv, memfree.address))
@@ -2100,11 +2121,6 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@@ -2189,11 +2205,6 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data,
@@ -2340,11 +2351,6 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_COPY_FROM_USER_IOCTL(indirect,
(drm_radeon_indirect_t __user *) data,
sizeof(indirect));
@@ -2417,11 +2423,6 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data,
@@ -2738,11 +2739,6 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf,
@@ -2897,11 +2893,6 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
drm_radeon_getparam_t param;
int value;
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data,
sizeof(param));
@@ -2981,11 +2972,6 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
drm_radeon_setparam_t sp;
struct drm_radeon_driver_file_fields *radeon_priv;
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data,
@@ -3012,6 +2998,9 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
case RADEON_SETPARAM_PCIGART_LOCATION:
dev_priv->pcigart_offset = sp.value;
break;
+ case RADEON_SETPARAM_NEW_MEMMAP:
+ dev_priv->new_memmap = sp.value;
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);
diff --git a/trunk/drivers/char/drm/sis_mm.c b/trunk/drivers/char/drm/sis_mm.c
index 6774d2fe3452..5e9936bc307f 100644
--- a/trunk/drivers/char/drm/sis_mm.c
+++ b/trunk/drivers/char/drm/sis_mm.c
@@ -110,7 +110,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
- DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
+ DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset);
return retval;
}
diff --git a/trunk/drivers/char/ipmi/ipmi_devintf.c b/trunk/drivers/char/ipmi/ipmi_devintf.c
index 932feedda262..e1c95374984c 100644
--- a/trunk/drivers/char/ipmi/ipmi_devintf.c
+++ b/trunk/drivers/char/ipmi/ipmi_devintf.c
@@ -42,7 +42,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -55,7 +55,7 @@ struct ipmi_file_private
struct file *file;
struct fasync_struct *fasync_queue;
wait_queue_head_t wait;
- struct semaphore recv_sem;
+ struct mutex recv_mutex;
int default_retries;
unsigned int default_retry_time_ms;
};
@@ -141,7 +141,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&(priv->recv_msgs));
init_waitqueue_head(&priv->wait);
priv->fasync_queue = NULL;
- sema_init(&(priv->recv_sem), 1);
+ mutex_init(&priv->recv_mutex);
/* Use the low-level defaults. */
priv->default_retries = -1;
@@ -285,15 +285,15 @@ static int ipmi_ioctl(struct inode *inode,
break;
}
- /* We claim a semaphore because we don't want two
+ /* We claim a mutex because we don't want two
users getting something from the queue at a time.
Since we have to release the spinlock before we can
copy the data to the user, it's possible another
user will grab something from the queue, too. Then
the messages might get out of order if something
fails and the message gets put back onto the
- queue. This semaphore prevents that problem. */
- down(&(priv->recv_sem));
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
/* Grab the message off the list. */
spin_lock_irqsave(&(priv->recv_msg_lock), flags);
@@ -352,7 +352,7 @@ static int ipmi_ioctl(struct inode *inode,
goto recv_putback_on_err;
}
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
ipmi_free_recv_msg(msg);
break;
@@ -362,11 +362,11 @@ static int ipmi_ioctl(struct inode *inode,
spin_lock_irqsave(&(priv->recv_msg_lock), flags);
list_add(entry, &(priv->recv_msgs));
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
break;
recv_err:
- up(&(priv->recv_sem));
+ mutex_unlock(&priv->recv_mutex);
break;
}
diff --git a/trunk/drivers/char/ipmi/ipmi_kcs_sm.c b/trunk/drivers/char/ipmi/ipmi_kcs_sm.c
index da1554194d3d..2062675f9e99 100644
--- a/trunk/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/trunk/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -227,7 +227,7 @@ static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
long time)
{
- if (! GET_STATUS_OBF(status)) {
+ if (!GET_STATUS_OBF(status)) {
kcs->obf_timeout -= time;
if (kcs->obf_timeout < 0) {
start_error_recovery(kcs, "OBF not ready in time");
@@ -407,7 +407,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (state == KCS_READ_STATE) {
- if (! check_obf(kcs, status, time))
+ if (!check_obf(kcs, status, time))
return SI_SM_CALL_WITH_DELAY;
read_next_byte(kcs);
} else {
@@ -447,7 +447,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
"Not in read state for error2");
break;
}
- if (! check_obf(kcs, status, time))
+ if (!check_obf(kcs, status, time))
return SI_SM_CALL_WITH_DELAY;
clear_obf(kcs, status);
@@ -462,7 +462,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
break;
}
- if (! check_obf(kcs, status, time))
+ if (!check_obf(kcs, status, time))
return SI_SM_CALL_WITH_DELAY;
clear_obf(kcs, status);
diff --git a/trunk/drivers/char/ipmi/ipmi_msghandler.c b/trunk/drivers/char/ipmi/ipmi_msghandler.c
index 40eb005b9d77..0ded046d5aa8 100644
--- a/trunk/drivers/char/ipmi/ipmi_msghandler.c
+++ b/trunk/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,6 +38,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -234,7 +235,7 @@ struct ipmi_smi
/* The list of command receivers that are registered for commands
on this interface. */
- struct semaphore cmd_rcvrs_lock;
+ struct mutex cmd_rcvrs_mutex;
struct list_head cmd_rcvrs;
/* Events that were queues because no one was there to receive
@@ -387,10 +388,10 @@ static void clean_up_interface_data(ipmi_smi_t intf)
/* Wholesale remove all the entries from the list in the
* interface and wait for RCU to know that none are in use. */
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
list_add_rcu(&list, &intf->cmd_rcvrs);
list_del_rcu(&intf->cmd_rcvrs);
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
list_for_each_entry_safe(rcvr, rcvr2, &list, link)
@@ -557,7 +558,7 @@ unsigned int ipmi_addr_length(int addr_type)
static void deliver_response(struct ipmi_recv_msg *msg)
{
- if (! msg->user) {
+ if (!msg->user) {
ipmi_smi_t intf = msg->user_msg_data;
unsigned long flags;
@@ -598,11 +599,11 @@ static int intf_next_seq(ipmi_smi_t intf,
(i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
i = (i+1)%IPMI_IPMB_NUM_SEQ)
{
- if (! intf->seq_table[i].inuse)
+ if (!intf->seq_table[i].inuse)
break;
}
- if (! intf->seq_table[i].inuse) {
+ if (!intf->seq_table[i].inuse) {
intf->seq_table[i].recv_msg = recv_msg;
/* Start with the maximum timeout, when the send response
@@ -763,7 +764,7 @@ int ipmi_create_user(unsigned int if_num,
}
new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
- if (! new_user)
+ if (!new_user)
return -ENOMEM;
spin_lock_irqsave(&interfaces_lock, flags);
@@ -819,14 +820,13 @@ static void free_user(struct kref *ref)
int ipmi_destroy_user(ipmi_user_t user)
{
- int rv = -ENODEV;
ipmi_smi_t intf = user->intf;
int i;
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- user->valid = 1;
+ user->valid = 0;
/* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags);
@@ -847,7 +847,7 @@ int ipmi_destroy_user(ipmi_user_t user)
* since other things may be using it till we do
* synchronize_rcu()) then free everything in that list.
*/
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
@@ -855,7 +855,7 @@ int ipmi_destroy_user(ipmi_user_t user)
rcvrs = rcvr;
}
}
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
@@ -871,7 +871,7 @@ int ipmi_destroy_user(ipmi_user_t user)
kref_put(&user->refcount, free_user);
- return rv;
+ return 0;
}
void ipmi_get_version(ipmi_user_t user,
@@ -936,7 +936,8 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
if (val) {
/* Deliver any queued events. */
- list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
+ list_for_each_entry_safe(msg, msg2, &intf->waiting_events,
+ link) {
list_del(&msg->link);
list_add_tail(&msg->link, &msgs);
}
@@ -978,13 +979,13 @@ int ipmi_register_for_cmd(ipmi_user_t user,
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
- if (! rcvr)
+ if (!rcvr)
return -ENOMEM;
rcvr->cmd = cmd;
rcvr->netfn = netfn;
rcvr->user = user;
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
entry = find_cmd_rcvr(intf, netfn, cmd);
if (entry) {
@@ -995,7 +996,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock:
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
if (rv)
kfree(rcvr);
@@ -1009,17 +1010,17 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
ipmi_smi_t intf = user->intf;
struct cmd_rcvr *rcvr;
- down(&intf->cmd_rcvrs_lock);
+ mutex_lock(&intf->cmd_rcvrs_mutex);
/* Make sure the command/netfn is not already registered. */
rcvr = find_cmd_rcvr(intf, netfn, cmd);
if ((rcvr) && (rcvr->user == user)) {
list_del_rcu(&rcvr->link);
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
kfree(rcvr);
return 0;
} else {
- up(&intf->cmd_rcvrs_lock);
+ mutex_unlock(&intf->cmd_rcvrs_mutex);
return -ENOENT;
}
}
@@ -1514,7 +1515,7 @@ int ipmi_request_settime(ipmi_user_t user,
unsigned char saddr, lun;
int rv;
- if (! user)
+ if (!user)
return -EINVAL;
rv = check_addr(user->intf, addr, &saddr, &lun);
if (rv)
@@ -1545,7 +1546,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
unsigned char saddr, lun;
int rv;
- if (! user)
+ if (!user)
return -EINVAL;
rv = check_addr(user->intf, addr, &saddr, &lun);
if (rv)
@@ -1570,7 +1571,7 @@ static int ipmb_file_read_proc(char *page, char **start, off_t off,
char *out = (char *) page;
ipmi_smi_t intf = data;
int i;
- int rv= 0;
+ int rv = 0;
for (i = 0; i < IPMI_MAX_CHANNELS; i++)
rv += sprintf(out+rv, "%x ", intf->channels[i].address);
@@ -1989,7 +1990,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
} else {
bmc->dev = platform_device_alloc("ipmi_bmc",
bmc->id.device_id);
- if (! bmc->dev) {
+ if (!bmc->dev) {
printk(KERN_ERR
"ipmi_msghandler:"
" Unable to allocate platform device\n");
@@ -2305,8 +2306,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *si_dev,
- unsigned char slave_addr,
- ipmi_smi_t *new_intf)
+ unsigned char slave_addr)
{
int i, j;
int rv;
@@ -2366,7 +2366,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
spin_lock_init(&intf->events_lock);
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
- init_MUTEX(&intf->cmd_rcvrs_lock);
+ mutex_init(&intf->cmd_rcvrs_mutex);
INIT_LIST_HEAD(&intf->cmd_rcvrs);
init_waitqueue_head(&intf->waitq);
@@ -2388,9 +2388,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
if (rv)
goto out;
- /* FIXME - this is an ugly kludge, this sets the intf for the
- caller before sending any messages with it. */
- *new_intf = intf;
+ rv = handlers->start_processing(send_info, intf);
+ if (rv)
+ goto out;
get_guid(intf);
@@ -2622,7 +2622,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
spin_unlock_irqrestore(&intf->counter_lock, flags);
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
/* We couldn't allocate memory for the
message, so requeue it for handling
later. */
@@ -2777,7 +2777,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
spin_unlock_irqrestore(&intf->counter_lock, flags);
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
/* We couldn't allocate memory for the
message, so requeue it for handling
later. */
@@ -2869,13 +2869,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
events. */
rcu_read_lock();
list_for_each_entry_rcu(user, &intf->users, link) {
- if (! user->gets_events)
+ if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
rcu_read_unlock();
- list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
+ list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+ link) {
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
}
@@ -2905,7 +2906,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
/* No one to receive the message, put it in queue if there's
not already too many things in the queue. */
recv_msg = ipmi_alloc_recv_msg();
- if (! recv_msg) {
+ if (!recv_msg) {
/* We couldn't allocate memory for the
message, so requeue it for handling
later. */
@@ -3190,7 +3191,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
rcu_read_lock();
list_for_each_entry_rcu(user, &intf->users, link) {
- if (! user->handler->ipmi_watchdog_pretimeout)
+ if (!user->handler->ipmi_watchdog_pretimeout)
continue;
user->handler->ipmi_watchdog_pretimeout(user->handler_data);
@@ -3278,7 +3279,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
- if (! smi_msg)
+ if (!smi_msg)
return;
spin_unlock_irqrestore(&intf->seq_lock, *flags);
@@ -3314,8 +3315,9 @@ static void ipmi_timeout_handler(long timeout_period)
/* See if any waiting messages need to be processed. */
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
- if (! handle_new_recv_msg(intf, smi_msg)) {
+ list_for_each_entry_safe(smi_msg, smi_msg2,
+ &intf->waiting_msgs, link) {
+ if (!handle_new_recv_msg(intf, smi_msg)) {
list_del(&smi_msg->link);
ipmi_free_smi_msg(smi_msg);
} else {
diff --git a/trunk/drivers/char/ipmi/ipmi_poweroff.c b/trunk/drivers/char/ipmi/ipmi_poweroff.c
index 786a2802ca34..d0b5c08e7b4e 100644
--- a/trunk/drivers/char/ipmi/ipmi_poweroff.c
+++ b/trunk/drivers/char/ipmi/ipmi_poweroff.c
@@ -346,7 +346,7 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user)
{
const char ipmi_version_major = ipmi_version & 0xF;
const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
- const char mfr[3]=DELL_IANA_MFR_ID;
+ const char mfr[3] = DELL_IANA_MFR_ID;
if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
ipmi_version_major <= 1 &&
ipmi_version_minor < 5)
diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c
index 35fbd4d8ed4b..a86c0f29953e 100644
--- a/trunk/drivers/char/ipmi/ipmi_si_intf.c
+++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c
@@ -803,7 +803,7 @@ static int ipmi_thread(void *data)
set_user_nice(current, 19);
while (!kthread_should_stop()) {
spin_lock_irqsave(&(smi_info->si_lock), flags);
- smi_result=smi_event_handler(smi_info, 0);
+ smi_result = smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
/* do nothing */
@@ -972,10 +972,37 @@ static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
return si_irq_handler(irq, data, regs);
}
+static int smi_start_processing(void *send_info,
+ ipmi_smi_t intf)
+{
+ struct smi_info *new_smi = send_info;
+
+ new_smi->intf = intf;
+
+ /* Set up the timer that drives the interface. */
+ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ new_smi->last_timeout_jiffies = jiffies;
+ mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (new_smi->si_type != SI_BT) {
+ new_smi->thread = kthread_run(ipmi_thread, new_smi,
+ "kipmi%d", new_smi->intf_num);
+ if (IS_ERR(new_smi->thread)) {
+ printk(KERN_NOTICE "ipmi_si_intf: Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
+ new_smi->thread = NULL;
+ }
+ }
+
+ return 0;
+}
static struct ipmi_smi_handlers handlers =
{
.owner = THIS_MODULE,
+ .start_processing = smi_start_processing,
.sender = sender,
.request_events = request_events,
.set_run_to_completion = set_run_to_completion,
@@ -987,7 +1014,7 @@ static struct ipmi_smi_handlers handlers =
#define SI_MAX_PARMS 4
static LIST_HEAD(smi_infos);
-static DECLARE_MUTEX(smi_infos_lock);
+static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
@@ -2162,9 +2189,13 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM))
- kthread_stop(smi_info->thread);
- del_timer_sync(&smi_info->si_timer);
+ if (smi_info->intf) {
+ /* The timer and thread are only running if the
+ interface has been started up and registered. */
+ if (smi_info->thread != NULL)
+ kthread_stop(smi_info->thread);
+ del_timer_sync(&smi_info->si_timer);
+ }
}
static struct ipmi_default_vals
@@ -2245,7 +2276,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->slave_addr, new_smi->irq);
}
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
printk(KERN_WARNING "ipmi_si: duplicate interface\n");
rv = -EBUSY;
@@ -2341,21 +2372,6 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->irq)
new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
- /* The ipmi_register_smi() code does some operations to
- determine the channel information, so we must be ready to
- handle operations before it is called. This means we have
- to stop the timer if we get an error after this point. */
- init_timer(&(new_smi->si_timer));
- new_smi->si_timer.data = (long) new_smi;
- new_smi->si_timer.function = smi_timeout;
- new_smi->last_timeout_jiffies = jiffies;
- new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
-
- add_timer(&(new_smi->si_timer));
- if (new_smi->si_type != SI_BT)
- new_smi->thread = kthread_run(ipmi_thread, new_smi,
- "kipmi%d", new_smi->intf_num);
-
if (!new_smi->dev) {
/* If we don't already have a device from something
* else (like PCI), then register a new one. */
@@ -2365,7 +2381,7 @@ static int try_smi_init(struct smi_info *new_smi)
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to allocate platform device\n");
- goto out_err_stop_timer;
+ goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
new_smi->dev->driver = &ipmi_driver;
@@ -2377,7 +2393,7 @@ static int try_smi_init(struct smi_info *new_smi)
" Unable to register system interface device:"
" %d\n",
rv);
- goto out_err_stop_timer;
+ goto out_err;
}
new_smi->dev_registered = 1;
}
@@ -2386,8 +2402,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi,
&new_smi->device_id,
new_smi->dev,
- new_smi->slave_addr,
- &(new_smi->intf));
+ new_smi->slave_addr);
if (rv) {
printk(KERN_ERR
"ipmi_si: Unable to register device: error %d\n",
@@ -2417,7 +2432,7 @@ static int try_smi_init(struct smi_info *new_smi)
list_add_tail(&new_smi->link, &smi_infos);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
@@ -2454,7 +2469,7 @@ static int try_smi_init(struct smi_info *new_smi)
kfree(new_smi);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
return rv;
}
@@ -2512,26 +2527,26 @@ static __devinit int init_ipmi_si(void)
#endif
if (si_trydefaults) {
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
default_find_bmc();
} else {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
}
}
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
printk("ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
return 0;
}
}
@@ -2607,10 +2622,10 @@ static __exit void cleanup_ipmi_si(void)
pci_unregister_driver(&ipmi_pci_driver);
#endif
- down(&smi_infos_lock);
+ mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
- up(&smi_infos_lock);
+ mutex_unlock(&smi_infos_lock);
driver_unregister(&ipmi_driver);
}
diff --git a/trunk/drivers/char/ipmi/ipmi_watchdog.c b/trunk/drivers/char/ipmi/ipmi_watchdog.c
index 7ece9f3c8f70..2d11ddd99e55 100644
--- a/trunk/drivers/char/ipmi/ipmi_watchdog.c
+++ b/trunk/drivers/char/ipmi/ipmi_watchdog.c
@@ -39,6 +39,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -303,21 +304,22 @@ static int ipmi_heartbeat(void);
static void panic_halt_ipmi_heartbeat(void);
-/* We use a semaphore to make sure that only one thing can send a set
+/* We use a mutex to make sure that only one thing can send a set
timeout at one time, because we only have one copy of the data.
- The semaphore is claimed when the set_timeout is sent and freed
+ The mutex is claimed when the set_timeout is sent and freed
when both messages are free. */
static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
-static DECLARE_MUTEX(set_timeout_lock);
+static DEFINE_MUTEX(set_timeout_lock);
+static DECLARE_COMPLETION(set_timeout_wait);
static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
{
if (atomic_dec_and_test(&set_timeout_tofree))
- up(&set_timeout_lock);
+ complete(&set_timeout_wait);
}
static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
{
if (atomic_dec_and_test(&set_timeout_tofree))
- up(&set_timeout_lock);
+ complete(&set_timeout_wait);
}
static struct ipmi_smi_msg set_timeout_smi_msg =
{
@@ -399,7 +401,7 @@ static int ipmi_set_timeout(int do_heartbeat)
/* We can only send one of these at a time. */
- down(&set_timeout_lock);
+ mutex_lock(&set_timeout_lock);
atomic_set(&set_timeout_tofree, 2);
@@ -407,16 +409,21 @@ static int ipmi_set_timeout(int do_heartbeat)
&set_timeout_recv_msg,
&send_heartbeat_now);
if (rv) {
- up(&set_timeout_lock);
- } else {
- if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
- || ((send_heartbeat_now)
- && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
- {
- rv = ipmi_heartbeat();
- }
+ mutex_unlock(&set_timeout_lock);
+ goto out;
}
+ wait_for_completion(&set_timeout_wait);
+
+ if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
+ || ((send_heartbeat_now)
+ && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ {
+ rv = ipmi_heartbeat();
+ }
+ mutex_unlock(&set_timeout_lock);
+
+out:
return rv;
}
@@ -458,17 +465,17 @@ static void panic_halt_ipmi_set_timeout(void)
The semaphore is claimed when the set_timeout is sent and freed
when both messages are free. */
static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
-static DECLARE_MUTEX(heartbeat_lock);
-static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock);
+static DEFINE_MUTEX(heartbeat_lock);
+static DECLARE_COMPLETION(heartbeat_wait);
static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
{
if (atomic_dec_and_test(&heartbeat_tofree))
- up(&heartbeat_wait_lock);
+ complete(&heartbeat_wait);
}
static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
{
if (atomic_dec_and_test(&heartbeat_tofree))
- up(&heartbeat_wait_lock);
+ complete(&heartbeat_wait);
}
static struct ipmi_smi_msg heartbeat_smi_msg =
{
@@ -511,14 +518,14 @@ static int ipmi_heartbeat(void)
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
}
- down(&heartbeat_lock);
+ mutex_lock(&heartbeat_lock);
atomic_set(&heartbeat_tofree, 2);
/* Don't reset the timer if we have the timer turned off, that
re-enables the watchdog. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
return 0;
}
@@ -539,14 +546,14 @@ static int ipmi_heartbeat(void)
&heartbeat_recv_msg,
1);
if (rv) {
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
printk(KERN_WARNING PFX "heartbeat failure: %d\n",
rv);
return rv;
}
/* Wait for the heartbeat to be sent. */
- down(&heartbeat_wait_lock);
+ wait_for_completion(&heartbeat_wait);
if (heartbeat_recv_msg.msg.data[0] != 0) {
/* Got an error in the heartbeat response. It was already
@@ -555,7 +562,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
- up(&heartbeat_lock);
+ mutex_unlock(&heartbeat_lock);
return rv;
}
@@ -589,7 +596,7 @@ static void panic_halt_ipmi_heartbeat(void)
1);
}
-static struct watchdog_info ident=
+static struct watchdog_info ident =
{
.options = 0, /* WDIOF_SETTIMEOUT, */
.firmware_version = 1,
@@ -790,13 +797,13 @@ static int ipmi_fasync(int fd, struct file *file, int on)
static int ipmi_close(struct inode *ino, struct file *filep)
{
- if (iminor(ino)==WATCHDOG_MINOR)
- {
+ if (iminor(ino) == WATCHDOG_MINOR) {
if (expect_close == 42) {
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else {
- printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+ printk(KERN_CRIT PFX
+ "Unexpected close, not stopping watchdog!\n");
ipmi_heartbeat();
}
clear_bit(0, &ipmi_wdog_open);
diff --git a/trunk/drivers/char/istallion.c b/trunk/drivers/char/istallion.c
index e5247f85a446..ef20c1fc9c4c 100644
--- a/trunk/drivers/char/istallion.c
+++ b/trunk/drivers/char/istallion.c
@@ -706,7 +706,6 @@ static int stli_portcmdstats(stliport_t *portp);
static int stli_clrportstats(stliport_t *portp, comstats_t __user *cp);
static int stli_getportstruct(stliport_t __user *arg);
static int stli_getbrdstruct(stlibrd_t __user *arg);
-static void *stli_memalloc(int len);
static stlibrd_t *stli_allocbrd(void);
static void stli_ecpinit(stlibrd_t *brdp);
@@ -997,17 +996,6 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
/*****************************************************************************/
-/*
- * Local driver kernel malloc routine.
- */
-
-static void *stli_memalloc(int len)
-{
- return((void *) kmalloc(len, GFP_KERNEL));
-}
-
-/*****************************************************************************/
-
static int stli_open(struct tty_struct *tty, struct file *filp)
{
stlibrd_t *brdp;
@@ -3227,13 +3215,12 @@ static int stli_initports(stlibrd_t *brdp)
#endif
for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) {
- portp = (stliport_t *) stli_memalloc(sizeof(stliport_t));
- if (portp == (stliport_t *) NULL) {
+ portp = kzalloc(sizeof(stliport_t), GFP_KERNEL);
+ if (!portp) {
printk("STALLION: failed to allocate port structure\n");
continue;
}
- memset(portp, 0, sizeof(stliport_t));
portp->magic = STLI_PORTMAGIC;
portp->portnr = i;
portp->brdnr = brdp->brdnr;
@@ -4610,14 +4597,13 @@ static stlibrd_t *stli_allocbrd(void)
{
stlibrd_t *brdp;
- brdp = (stlibrd_t *) stli_memalloc(sizeof(stlibrd_t));
- if (brdp == (stlibrd_t *) NULL) {
+ brdp = kzalloc(sizeof(stlibrd_t), GFP_KERNEL);
+ if (!brdp) {
printk(KERN_ERR "STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlibrd_t));
- return((stlibrd_t *) NULL);
+ return NULL;
}
- memset(brdp, 0, sizeof(stlibrd_t));
brdp->magic = STLI_BOARDMAGIC;
return(brdp);
}
@@ -5210,12 +5196,12 @@ int __init stli_init(void)
/*
* Allocate a temporary write buffer.
*/
- stli_tmpwritebuf = (char *) stli_memalloc(STLI_TXBUFSIZE);
- if (stli_tmpwritebuf == (char *) NULL)
+ stli_tmpwritebuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
+ if (!stli_tmpwritebuf)
printk(KERN_ERR "STALLION: failed to allocate memory "
"(size=%d)\n", STLI_TXBUFSIZE);
- stli_txcookbuf = stli_memalloc(STLI_TXBUFSIZE);
- if (stli_txcookbuf == (char *) NULL)
+ stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
+ if (!stli_txcookbuf)
printk(KERN_ERR "STALLION: failed to allocate memory "
"(size=%d)\n", STLI_TXBUFSIZE);
diff --git a/trunk/drivers/char/stallion.c b/trunk/drivers/char/stallion.c
index 3f5d6077f39c..a9c5a7230f89 100644
--- a/trunk/drivers/char/stallion.c
+++ b/trunk/drivers/char/stallion.c
@@ -504,7 +504,6 @@ static int stl_echmcaintr(stlbrd_t *brdp);
static int stl_echpciintr(stlbrd_t *brdp);
static int stl_echpci64intr(stlbrd_t *brdp);
static void stl_offintr(void *private);
-static void *stl_memalloc(int len);
static stlbrd_t *stl_allocbrd(void);
static stlport_t *stl_getport(int brdnr, int panelnr, int portnr);
@@ -939,17 +938,6 @@ static int stl_parsebrd(stlconf_t *confp, char **argp)
/*****************************************************************************/
-/*
- * Local driver kernel memory allocation routine.
- */
-
-static void *stl_memalloc(int len)
-{
- return (void *) kmalloc(len, GFP_KERNEL);
-}
-
-/*****************************************************************************/
-
/*
* Allocate a new board structure. Fill out the basic info in it.
*/
@@ -958,14 +946,13 @@ static stlbrd_t *stl_allocbrd(void)
{
stlbrd_t *brdp;
- brdp = (stlbrd_t *) stl_memalloc(sizeof(stlbrd_t));
- if (brdp == (stlbrd_t *) NULL) {
+ brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL);
+ if (!brdp) {
printk("STALLION: failed to allocate memory (size=%d)\n",
sizeof(stlbrd_t));
- return (stlbrd_t *) NULL;
+ return NULL;
}
- memset(brdp, 0, sizeof(stlbrd_t));
brdp->magic = STL_BOARDMAGIC;
return brdp;
}
@@ -1017,9 +1004,9 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
portp->refcount++;
if ((portp->flags & ASYNC_INITIALIZED) == 0) {
- if (portp->tx.buf == (char *) NULL) {
- portp->tx.buf = (char *) stl_memalloc(STL_TXBUFSIZE);
- if (portp->tx.buf == (char *) NULL)
+ if (!portp->tx.buf) {
+ portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
+ if (!portp->tx.buf)
return -ENOMEM;
portp->tx.head = portp->tx.buf;
portp->tx.tail = portp->tx.buf;
@@ -2178,13 +2165,12 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
* each ports data structures.
*/
for (i = 0; (i < panelp->nrports); i++) {
- portp = (stlport_t *) stl_memalloc(sizeof(stlport_t));
- if (portp == (stlport_t *) NULL) {
+ portp = kzalloc(sizeof(stlport_t), GFP_KERNEL);
+ if (!portp) {
printk("STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlport_t));
break;
}
- memset(portp, 0, sizeof(stlport_t));
portp->magic = STL_PORTMAGIC;
portp->portnr = i;
@@ -2315,13 +2301,12 @@ static inline int stl_initeio(stlbrd_t *brdp)
* can complete the setup.
*/
- panelp = (stlpanel_t *) stl_memalloc(sizeof(stlpanel_t));
- if (panelp == (stlpanel_t *) NULL) {
+ panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
+ if (!panelp) {
printk(KERN_WARNING "STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlpanel_t));
- return(-ENOMEM);
+ return -ENOMEM;
}
- memset(panelp, 0, sizeof(stlpanel_t));
panelp->magic = STL_PANELMAGIC;
panelp->brdnr = brdp->brdnr;
@@ -2490,13 +2475,12 @@ static inline int stl_initech(stlbrd_t *brdp)
status = inb(ioaddr + ECH_PNLSTATUS);
if ((status & ECH_PNLIDMASK) != nxtid)
break;
- panelp = (stlpanel_t *) stl_memalloc(sizeof(stlpanel_t));
- if (panelp == (stlpanel_t *) NULL) {
+ panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
+ if (!panelp) {
printk("STALLION: failed to allocate memory "
"(size=%d)\n", sizeof(stlpanel_t));
break;
}
- memset(panelp, 0, sizeof(stlpanel_t));
panelp->magic = STL_PANELMAGIC;
panelp->brdnr = brdp->brdnr;
panelp->panelnr = panelnr;
@@ -3074,8 +3058,8 @@ static int __init stl_init(void)
/*
* Allocate a temporary write buffer.
*/
- stl_tmpwritebuf = (char *) stl_memalloc(STL_TXBUFSIZE);
- if (stl_tmpwritebuf == (char *) NULL)
+ stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
+ if (!stl_tmpwritebuf)
printk("STALLION: failed to allocate memory (size=%d)\n",
STL_TXBUFSIZE);
diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c
index 0bfd1b63662e..98b126c2ded8 100644
--- a/trunk/drivers/char/tty_io.c
+++ b/trunk/drivers/char/tty_io.c
@@ -376,7 +376,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, s
return copied;
}
-EXPORT_SYMBOL_GPL(tty_insert_flip_string);
+EXPORT_SYMBOL(tty_insert_flip_string);
int tty_insert_flip_string_flags(struct tty_struct *tty, const unsigned char *chars, const char *flags, size_t size)
{
diff --git a/trunk/drivers/char/vt.c b/trunk/drivers/char/vt.c
index ca4844c527da..acc5d47844eb 100644
--- a/trunk/drivers/char/vt.c
+++ b/trunk/drivers/char/vt.c
@@ -2328,6 +2328,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
case TIOCL_SETVESABLANK:
set_vesa_blanking(p);
break;
+ case TIOCL_GETKMSGREDIRECT:
+ data = kmsg_redirect;
+ ret = __put_user(data, p);
+ break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
diff --git a/trunk/drivers/edac/Kconfig b/trunk/drivers/edac/Kconfig
index b582d0cdc24f..4f0898400c6d 100644
--- a/trunk/drivers/edac/Kconfig
+++ b/trunk/drivers/edac/Kconfig
@@ -71,7 +71,7 @@ config EDAC_E7XXX
config EDAC_E752X
tristate "Intel e752x (e7520, e7525, e7320)"
- depends on EDAC_MM_EDAC && PCI && X86
+ depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
help
Support for error detection and correction on the Intel
E7520, E7525, E7320 server chipsets.
diff --git a/trunk/drivers/ide/ide-disk.c b/trunk/drivers/ide/ide-disk.c
index ccf528d733bf..a5017de72da5 100644
--- a/trunk/drivers/ide/ide-disk.c
+++ b/trunk/drivers/ide/ide-disk.c
@@ -61,6 +61,7 @@
#include
#include
#include
+#include
#define _IDE_DISK
@@ -317,6 +318,8 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
return ide_stopped;
}
+ ledtrig_ide_activity();
+
pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
(unsigned long long)block, rq->nr_sectors,
diff --git a/trunk/drivers/ide/ide-taskfile.c b/trunk/drivers/ide/ide-taskfile.c
index 0606bd2f6020..9233b8109a0f 100644
--- a/trunk/drivers/ide/ide-taskfile.c
+++ b/trunk/drivers/ide/ide-taskfile.c
@@ -375,7 +375,13 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
}
}
- ide_end_request(drive, 1, rq->hard_nr_sectors);
+ if (rq->rq_disk) {
+ ide_driver_t *drv;
+
+ drv = *(ide_driver_t **)rq->rq_disk->private_data;;
+ drv->end_request(drive, 1, rq->hard_nr_sectors);
+ } else
+ ide_end_request(drive, 1, rq->hard_nr_sectors);
}
/*
diff --git a/trunk/drivers/infiniband/core/mad.c b/trunk/drivers/infiniband/core/mad.c
index f7854b65fd55..ba54c856b0e5 100644
--- a/trunk/drivers/infiniband/core/mad.c
+++ b/trunk/drivers/infiniband/core/mad.c
@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (!is_vendor_oui(mad_reg_req->oui))
goto error1;
}
+ /* Make sure class supplied is consistent with RMPP */
+ if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
+ if (!rmpp_version)
+ goto error1;
+ } else {
+ if (rmpp_version)
+ goto error1;
+ }
/* Make sure class supplied is consistent with QP type */
if (qp_type == IB_QPT_SMI) {
if ((mad_reg_req->mgmt_class !=
@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
}
EXPORT_SYMBOL(ib_create_send_mad);
+int ib_get_mad_data_offset(u8 mgmt_class)
+{
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
+ return IB_MGMT_SA_HDR;
+ else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
+ (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
+ (mgmt_class == IB_MGMT_CLASS_BIS))
+ return IB_MGMT_DEVICE_HDR;
+ else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
+ (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
+ return IB_MGMT_VENDOR_HDR;
+ else
+ return IB_MGMT_MAD_HDR;
+}
+EXPORT_SYMBOL(ib_get_mad_data_offset);
+
+int ib_is_mad_class_rmpp(u8 mgmt_class)
+{
+ if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
+ (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
+ (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
+ (mgmt_class == IB_MGMT_CLASS_BIS) ||
+ ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
+ (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(ib_is_mad_class_rmpp);
+
void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
{
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
goto error;
}
+ if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
+ if (mad_agent_priv->agent.rmpp_version) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+
/*
* Save pointer to next work request to post in case the
* current one completes, and the user modifies the work
@@ -1618,14 +1662,59 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
}
+static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
+ struct ib_mad_recv_wc *rwc)
+{
+ return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
+ rwc->recv_buf.mad->mad_hdr.mgmt_class;
+}
+
+static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr,
+ struct ib_mad_recv_wc *rwc )
+{
+ struct ib_ah_attr attr;
+ u8 send_resp, rcv_resp;
+
+ send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
+ mad_hdr.method & IB_MGMT_METHOD_RESP;
+ rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
+
+ if (!send_resp && rcv_resp)
+ /* is request/response. GID/LIDs are both local (same). */
+ return 1;
+
+ if (send_resp == rcv_resp)
+ /* both requests, or both responses. GIDs different */
+ return 0;
+
+ if (ib_query_ah(wr->send_buf.ah, &attr))
+ /* Assume not equal, to avoid false positives. */
+ return 0;
+
+ if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH))
+ return attr.dlid == rwc->wc->slid;
+ else if ((attr.ah_flags & IB_AH_GRH) &&
+ (rwc->wc->wc_flags & IB_WC_GRH))
+ return memcmp(attr.grh.dgid.raw,
+ rwc->recv_buf.grh->sgid.raw, 16) == 0;
+ else
+ /* one has GID, other does not. Assume different */
+ return 0;
+}
struct ib_mad_send_wr_private*
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
+ struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_mad_send_wr_private *mad_send_wr;
+ struct ib_mad *mad;
+
+ mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad;
list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
agent_list) {
- if (mad_send_wr->tid == tid)
+ if ((mad_send_wr->tid == mad->mad_hdr.tid) &&
+ rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
+ rcv_has_same_gid(mad_send_wr, mad_recv_wc))
return mad_send_wr;
}
@@ -1636,7 +1725,10 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
agent_list) {
if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
- mad_send_wr->tid == tid && mad_send_wr->timeout) {
+ mad_send_wr->tid == mad->mad_hdr.tid &&
+ mad_send_wr->timeout &&
+ rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
+ rcv_has_same_gid(mad_send_wr, mad_recv_wc)) {
/* Verify request has not been canceled */
return (mad_send_wr->status == IB_WC_SUCCESS) ?
mad_send_wr : NULL;
@@ -1661,7 +1753,6 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
- __be64 tid;
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -1677,9 +1768,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
/* Complete corresponding request */
if (response_mad(mad_recv_wc->recv_buf.mad)) {
- tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
- mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
+ mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
ib_free_recv_mad(mad_recv_wc);
@@ -2408,11 +2498,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
}
}
sg_list.addr = dma_map_single(qp_info->port_priv->
- device->dma_device,
- &mad_priv->grh,
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
+ device->dma_device,
+ &mad_priv->grh,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
diff --git a/trunk/drivers/infiniband/core/mad_priv.h b/trunk/drivers/infiniband/core/mad_priv.h
index a7125d4b5ccf..6c9c133d71ef 100644
--- a/trunk/drivers/infiniband/core/mad_priv.h
+++ b/trunk/drivers/infiniband/core/mad_priv.h
@@ -216,7 +216,8 @@ extern kmem_cache_t *ib_mad_cache;
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
struct ib_mad_send_wr_private *
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid);
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
+ struct ib_mad_recv_wc *mad_recv_wc);
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc);
diff --git a/trunk/drivers/infiniband/core/mad_rmpp.c b/trunk/drivers/infiniband/core/mad_rmpp.c
index bacfdd5bddad..dfd4e588ce03 100644
--- a/trunk/drivers/infiniband/core/mad_rmpp.c
+++ b/trunk/drivers/infiniband/core/mad_rmpp.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005 Intel Inc. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
}
}
-static int data_offset(u8 mgmt_class)
-{
- if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
- return IB_MGMT_SA_HDR;
- else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
- (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
- return IB_MGMT_VENDOR_HDR;
- else
- return IB_MGMT_RMPP_HDR;
-}
-
static void format_ack(struct ib_mad_send_buf *msg,
struct ib_rmpp_mad *data,
struct mad_rmpp_recv *rmpp_recv)
@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_send_buf *msg;
int ret, hdr_len;
- hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
+ hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, hdr_len,
0, GFP_KERNEL);
@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
if (IS_ERR(ah))
return (void *) ah;
- hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
+ hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1,
hdr_len, 0, GFP_KERNEL);
@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
- hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
+ hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
if (pad > IB_MGMT_RMPP_DATA || pad < 0)
@@ -562,15 +551,15 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
return ib_send_mad(mad_send_wr);
}
-static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
- u8 rmpp_status)
+static void abort_send(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
{
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc wc;
unsigned long flags;
spin_lock_irqsave(&agent->lock, flags);
- mad_send_wr = ib_find_send_mad(agent, tid);
+ mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
if (!mad_send_wr)
goto out; /* Unmatched send */
@@ -612,8 +601,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
if (rmpp_mad->rmpp_hdr.rmpp_status) {
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
return;
}
@@ -621,14 +609,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
if (newwin < seg_num) {
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_W2S);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
return;
}
spin_lock_irqsave(&agent->lock, flags);
- mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
+ mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
if (!mad_send_wr)
goto out; /* Unmatched ACK */
@@ -639,8 +626,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (seg_num > mad_send_wr->send_buf.seg_count ||
seg_num > mad_send_wr->newwin) {
spin_unlock_irqrestore(&agent->lock, flags);
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_S2B);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
return;
}
@@ -728,12 +714,10 @@ static void process_rmpp_stop(struct ib_mad_agent_private *agent,
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
} else
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- rmpp_mad->rmpp_hdr.rmpp_status);
+ abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
}
static void process_rmpp_abort(struct ib_mad_agent_private *agent,
@@ -745,12 +729,10 @@ static void process_rmpp_abort(struct ib_mad_agent_private *agent,
if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
} else
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- rmpp_mad->rmpp_hdr.rmpp_status);
+ abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
}
struct ib_mad_recv_wc *
@@ -764,8 +746,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
return mad_recv_wc;
if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_UNV);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
goto out;
}
@@ -783,8 +764,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
process_rmpp_abort(agent, mad_recv_wc);
break;
default:
- abort_send(agent, rmpp_mad->mad_hdr.tid,
- IB_MGMT_RMPP_STATUS_BADT);
+ abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
break;
}
diff --git a/trunk/drivers/infiniband/core/user_mad.c b/trunk/drivers/infiniband/core/user_mad.c
index fb6cd42601f9..afe70a549c2f 100644
--- a/trunk/drivers/infiniband/core/user_mad.c
+++ b/trunk/drivers/infiniband/core/user_mad.c
@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file,
return ret;
}
-static int data_offset(u8 mgmt_class)
-{
- if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
- return IB_MGMT_SA_HDR;
- else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
- (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
- return IB_MGMT_VENDOR_HDR;
- else
- return IB_MGMT_RMPP_HDR;
-}
-
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *send_wc)
{
@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
*/
return -ENOSPC;
}
- offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class);
+ offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
max_seg_payload = sizeof (struct ib_mad) - offset;
for (left = packet->length - seg_payload, buf += seg_payload;
@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
- if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
- hdr_len = IB_MGMT_SA_HDR;
- copy_offset = IB_MGMT_RMPP_HDR;
- rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
- IB_MGMT_RMPP_FLAG_ACTIVE;
- } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
- rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
- hdr_len = IB_MGMT_VENDOR_HDR;
+ hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
+ if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
+ copy_offset = IB_MGMT_MAD_HDR;
+ rmpp_active = 0;
+ } else {
copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE;
- } else {
- hdr_len = IB_MGMT_MAD_HDR;
- copy_offset = IB_MGMT_MAD_HDR;
- rmpp_active = 0;
}
data_len = count - sizeof (struct ib_user_mad) - hdr_len;
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_av.c b/trunk/drivers/infiniband/hw/mthca/mthca_av.c
index f023d3936518..bc5bdcbe51b5 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_av.c
@@ -265,7 +265,7 @@ int __devinit mthca_init_av_table(struct mthca_dev *dev)
return -ENOMEM;
}
-void __devexit mthca_cleanup_av_table(struct mthca_dev *dev)
+void mthca_cleanup_av_table(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev))
return;
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_cq.c b/trunk/drivers/infiniband/hw/mthca/mthca_cq.c
index 76aabc5bf371..312cf90731ea 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -973,7 +973,7 @@ int __devinit mthca_init_cq_table(struct mthca_dev *dev)
return err;
}
-void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev)
+void mthca_cleanup_cq_table(struct mthca_dev *dev)
{
mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
mthca_alloc_cleanup(&dev->cq_table.alloc);
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_eq.c b/trunk/drivers/infiniband/hw/mthca/mthca_eq.c
index cbdc348fb689..99f109c3815d 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -765,7 +765,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
}
-static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev)
+static void mthca_unmap_eq_regs(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev)) {
mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
@@ -821,7 +821,7 @@ int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
return ret;
}
-void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
+void mthca_unmap_eq_icm(struct mthca_dev *dev)
{
u8 status;
@@ -954,7 +954,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
return err;
}
-void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev)
+void mthca_cleanup_eq_table(struct mthca_dev *dev)
{
u8 status;
int i;
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mad.c b/trunk/drivers/infiniband/hw/mthca/mthca_mad.c
index 4ace6a392f41..dfb482eac9a2 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -271,7 +271,7 @@ int mthca_create_agents(struct mthca_dev *dev)
return PTR_ERR(agent);
}
-void mthca_free_agents(struct mthca_dev *dev)
+void __devexit mthca_free_agents(struct mthca_dev *dev)
{
struct ib_mad_agent *agent;
int p, q;
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c b/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c
index 9965bda9afed..47ca8a9b7247 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -388,7 +388,7 @@ int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
return 0;
}
-void __devexit mthca_cleanup_mcg_table(struct mthca_dev *dev)
+void mthca_cleanup_mcg_table(struct mthca_dev *dev)
{
mthca_alloc_cleanup(&dev->mcg_table.alloc);
}
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mr.c b/trunk/drivers/infiniband/hw/mthca/mthca_mr.c
index 698b62125765..25e1c1db9a40 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -170,7 +170,7 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
return -ENOMEM;
}
-static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy)
+static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
{
int i;
@@ -866,7 +866,7 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
return err;
}
-void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev)
+void mthca_cleanup_mr_table(struct mthca_dev *dev)
{
/* XXX check if any MRs are still allocated? */
if (dev->limits.fmr_reserved_mtts)
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_pd.c b/trunk/drivers/infiniband/hw/mthca/mthca_pd.c
index 105fc5faaddf..59df51614c85 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -77,7 +77,7 @@ int __devinit mthca_init_pd_table(struct mthca_dev *dev)
dev->limits.reserved_pds);
}
-void __devexit mthca_cleanup_pd_table(struct mthca_dev *dev)
+void mthca_cleanup_pd_table(struct mthca_dev *dev)
{
/* XXX check if any PDs are still allocated? */
mthca_alloc_cleanup(&dev->pd_table.alloc);
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c
index 1bc2678c2fae..057c8e6af87b 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -2204,7 +2204,7 @@ int __devinit mthca_init_qp_table(struct mthca_dev *dev)
return err;
}
-void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
+void mthca_cleanup_qp_table(struct mthca_dev *dev)
{
int i;
u8 status;
diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_srq.c b/trunk/drivers/infiniband/hw/mthca/mthca_srq.c
index 0cfd15802217..2dd3aea05341 100644
--- a/trunk/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/trunk/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -206,7 +206,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
roundup_pow_of_two(sizeof (struct mthca_next_seg) +
srq->max_gs * sizeof (struct mthca_data_seg)));
- if (ds > dev->limits.max_desc_sz)
+ if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
return -EINVAL;
srq->wqe_shift = long_log2(ds);
@@ -684,7 +684,7 @@ int __devinit mthca_init_srq_table(struct mthca_dev *dev)
return err;
}
-void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
+void mthca_cleanup_srq_table(struct mthca_dev *dev)
{
if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
return;
diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 53a32f65788d..9b0bd7c746ca 100644
--- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -723,7 +723,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
* destination address onto the front of the skb so we can
* figure out where to send the packet later.
*/
- if (!skb->dst || !skb->dst->neighbour) {
+ if ((!skb->dst || !skb->dst->neighbour) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c
index 61924cc30e55..fd8a95a9c5d3 100644
--- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c
@@ -607,10 +607,10 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
*/
if (likely(scmnd->use_sg)) {
nents = scmnd->use_sg;
- scat = (struct scatterlist *) scmnd->request_buffer;
+ scat = scmnd->request_buffer;
} else {
nents = 1;
- scat = (struct scatterlist *) scmnd->request_buffer;
+ scat = &req->fake_sg;
}
dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
diff --git a/trunk/drivers/input/keyboard/hil_kbd.c b/trunk/drivers/input/keyboard/hil_kbd.c
index 0a90962c38e7..63f387e4b783 100644
--- a/trunk/drivers/input/keyboard/hil_kbd.c
+++ b/trunk/drivers/input/keyboard/hil_kbd.c
@@ -66,7 +66,7 @@ static unsigned int hil_kbd_set3[HIL_KEYCODES_SET3_TBLSIZE] =
static char hil_language[][16] = { HIL_LOCALE_MAP };
struct hil_kbd {
- struct input_dev dev;
+ struct input_dev *dev;
struct serio *serio;
/* Input buffer and index for packets from HIL bus. */
@@ -86,7 +86,7 @@ struct hil_kbd {
/* Process a complete packet after transfer from the HIL */
static void hil_kbd_process_record(struct hil_kbd *kbd)
{
- struct input_dev *dev = &kbd->dev;
+ struct input_dev *dev = kbd->dev;
hil_packet *data = kbd->data;
hil_packet p;
int idx, i, cnt;
@@ -240,8 +240,8 @@ static void hil_kbd_disconnect(struct serio *serio)
return;
}
- input_unregister_device(&kbd->dev);
serio_close(serio);
+ input_unregister_device(kbd->dev);
kfree(kbd);
}
@@ -251,16 +251,18 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv)
uint8_t did, *idd;
int i;
- kbd = kmalloc(sizeof(*kbd), GFP_KERNEL);
+ kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
if (!kbd)
return -ENOMEM;
- memset(kbd, 0, sizeof(struct hil_kbd));
+
+ kbd->dev = input_allocate_device();
+ if (!kbd->dev) goto bail1;
+ kbd->dev->private = kbd;
if (serio_open(serio, drv)) goto bail0;
serio_set_drvdata(serio, kbd);
kbd->serio = serio;
- kbd->dev.private = kbd;
init_MUTEX_LOCKED(&(kbd->sem));
@@ -302,38 +304,38 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv)
did, hil_language[did & HIL_IDD_DID_TYPE_KB_LANG_MASK]);
break;
default:
- goto bail1;
+ goto bail2;
}
if(HIL_IDD_NUM_BUTTONS(idd) || HIL_IDD_NUM_AXES_PER_SET(*idd)) {
printk(KERN_INFO PREFIX "keyboards only, no combo devices supported.\n");
- goto bail1;
+ goto bail2;
}
- kbd->dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- kbd->dev.ledbit[0] = BIT(LED_NUML) | BIT(LED_CAPSL) | BIT(LED_SCROLLL);
- kbd->dev.keycodemax = HIL_KEYCODES_SET1_TBLSIZE;
- kbd->dev.keycodesize = sizeof(hil_kbd_set1[0]);
- kbd->dev.keycode = hil_kbd_set1;
- kbd->dev.name = strlen(kbd->rnm) ? kbd->rnm : HIL_GENERIC_NAME;
- kbd->dev.phys = "hpkbd/input0"; /* XXX */
+ kbd->dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
+ kbd->dev->ledbit[0] = BIT(LED_NUML) | BIT(LED_CAPSL) | BIT(LED_SCROLLL);
+ kbd->dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE;
+ kbd->dev->keycodesize = sizeof(hil_kbd_set1[0]);
+ kbd->dev->keycode = hil_kbd_set1;
+ kbd->dev->name = strlen(kbd->rnm) ? kbd->rnm : HIL_GENERIC_NAME;
+ kbd->dev->phys = "hpkbd/input0"; /* XXX */
- kbd->dev.id.bustype = BUS_HIL;
- kbd->dev.id.vendor = PCI_VENDOR_ID_HP;
- kbd->dev.id.product = 0x0001; /* TODO: get from kbd->rsc */
- kbd->dev.id.version = 0x0100; /* TODO: get from kbd->rsc */
- kbd->dev.dev = &serio->dev;
+ kbd->dev->id.bustype = BUS_HIL;
+ kbd->dev->id.vendor = PCI_VENDOR_ID_HP;
+ kbd->dev->id.product = 0x0001; /* TODO: get from kbd->rsc */
+ kbd->dev->id.version = 0x0100; /* TODO: get from kbd->rsc */
+ kbd->dev->dev = &serio->dev;
for (i = 0; i < 128; i++) {
- set_bit(hil_kbd_set1[i], kbd->dev.keybit);
- set_bit(hil_kbd_set3[i], kbd->dev.keybit);
+ set_bit(hil_kbd_set1[i], kbd->dev->keybit);
+ set_bit(hil_kbd_set3[i], kbd->dev->keybit);
}
- clear_bit(0, kbd->dev.keybit);
+ clear_bit(0, kbd->dev->keybit);
- input_register_device(&kbd->dev);
+ input_register_device(kbd->dev);
printk(KERN_INFO "input: %s, ID: %d\n",
- kbd->dev.name, did);
+ kbd->dev->name, did);
serio->write(serio, 0);
serio->write(serio, 0);
@@ -343,8 +345,10 @@ static int hil_kbd_connect(struct serio *serio, struct serio_driver *drv)
up(&(kbd->sem));
return 0;
- bail1:
+ bail2:
serio_close(serio);
+ bail1:
+ input_free_device(kbd->dev);
bail0:
kfree(kbd);
serio_set_drvdata(serio, NULL);
diff --git a/trunk/drivers/input/keyboard/hilkbd.c b/trunk/drivers/input/keyboard/hilkbd.c
index e95bc052e32a..33edd030aa75 100644
--- a/trunk/drivers/input/keyboard/hilkbd.c
+++ b/trunk/drivers/input/keyboard/hilkbd.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1998 Philip Blundell
* Copyright (C) 1999 Matthew Wilcox
- * Copyright (C) 1999-2003 Helge Deller
+ * Copyright (C) 1999-2006 Helge Deller
*
* Very basic HP Human Interface Loop (HIL) driver.
* This driver handles the keyboard on HP300 (m68k) and on some
@@ -90,7 +90,7 @@ static unsigned int hphilkeyb_keycode[HIL_KEYCODES_SET1_TBLSIZE] =
/* HIL structure */
static struct {
- struct input_dev dev;
+ struct input_dev *dev;
unsigned int curdev;
@@ -117,7 +117,7 @@ static void poll_finished(void)
down = (hil_dev.data[1] & 1) == 0;
scode = hil_dev.data[1] >> 1;
key = hphilkeyb_keycode[scode];
- input_report_key(&hil_dev.dev, key, down);
+ input_report_key(hil_dev.dev, key, down);
break;
}
hil_dev.curdev = 0;
@@ -207,9 +207,14 @@ hil_keyb_init(void)
unsigned int i, kbid;
wait_queue_head_t hil_wait;
- if (hil_dev.dev.id.bustype) {
+ if (hil_dev.dev) {
return -ENODEV; /* already initialized */
}
+
+ hil_dev.dev = input_allocate_device();
+ if (!hil_dev.dev)
+ return -ENOMEM;
+ hil_dev.dev->private = &hil_dev;
#if defined(CONFIG_HP300)
if (!hwreg_present((void *)(HILBASE + HIL_DATA)))
@@ -247,28 +252,26 @@ hil_keyb_init(void)
c = 0;
hil_do(HIL_WRITEKBDSADR, &c, 1);
- init_input_dev(&hil_dev.dev);
-
for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++)
if (hphilkeyb_keycode[i] != KEY_RESERVED)
- set_bit(hphilkeyb_keycode[i], hil_dev.dev.keybit);
-
- hil_dev.dev.evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- hil_dev.dev.ledbit[0] = BIT(LED_NUML) | BIT(LED_CAPSL) | BIT(LED_SCROLLL);
- hil_dev.dev.keycodemax = HIL_KEYCODES_SET1_TBLSIZE;
- hil_dev.dev.keycodesize = sizeof(hphilkeyb_keycode[0]);
- hil_dev.dev.keycode = hphilkeyb_keycode;
- hil_dev.dev.name = "HIL keyboard";
- hil_dev.dev.phys = "hpkbd/input0";
-
- hil_dev.dev.id.bustype = BUS_HIL;
- hil_dev.dev.id.vendor = PCI_VENDOR_ID_HP;
- hil_dev.dev.id.product = 0x0001;
- hil_dev.dev.id.version = 0x0010;
-
- input_register_device(&hil_dev.dev);
+ set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit);
+
+ hil_dev.dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
+ hil_dev.dev->ledbit[0] = BIT(LED_NUML) | BIT(LED_CAPSL) | BIT(LED_SCROLLL);
+ hil_dev.dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE;
+ hil_dev.dev->keycodesize = sizeof(hphilkeyb_keycode[0]);
+ hil_dev.dev->keycode = hphilkeyb_keycode;
+ hil_dev.dev->name = "HIL keyboard";
+ hil_dev.dev->phys = "hpkbd/input0";
+
+ hil_dev.dev->id.bustype = BUS_HIL;
+ hil_dev.dev->id.vendor = PCI_VENDOR_ID_HP;
+ hil_dev.dev->id.product = 0x0001;
+ hil_dev.dev->id.version = 0x0010;
+
+ input_register_device(hil_dev.dev);
printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n",
- hil_dev.dev.name, kbid, HILBASE, HIL_IRQ);
+ hil_dev.dev->name, kbid, HILBASE, HIL_IRQ);
return 0;
}
@@ -329,7 +332,9 @@ static void __exit hil_exit(void)
/* Turn off interrupts */
hil_do(HIL_INTOFF, NULL, 0);
- input_unregister_device(&hil_dev.dev);
+ input_unregister_device(hil_dev.dev);
+
+ hil_dev.dev = NULL;
#if defined(CONFIG_PARISC)
unregister_parisc_driver(&hil_driver);
diff --git a/trunk/drivers/input/mouse/hil_ptr.c b/trunk/drivers/input/mouse/hil_ptr.c
index c2bf2ed07dc6..bfb564fd8fe2 100644
--- a/trunk/drivers/input/mouse/hil_ptr.c
+++ b/trunk/drivers/input/mouse/hil_ptr.c
@@ -55,7 +55,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define HIL_PTR_MAX_LENGTH 16
struct hil_ptr {
- struct input_dev dev;
+ struct input_dev *dev;
struct serio *serio;
/* Input buffer and index for packets from HIL bus. */
@@ -79,7 +79,7 @@ struct hil_ptr {
/* Process a complete packet after transfer from the HIL */
static void hil_ptr_process_record(struct hil_ptr *ptr)
{
- struct input_dev *dev = &ptr->dev;
+ struct input_dev *dev = ptr->dev;
hil_packet *data = ptr->data;
hil_packet p;
int idx, i, cnt, laxis;
@@ -148,12 +148,12 @@ static void hil_ptr_process_record(struct hil_ptr *ptr)
if (absdev) {
val = lo + (hi<<8);
#ifdef TABLET_AUTOADJUST
- if (val < ptr->dev.absmin[ABS_X + i])
- ptr->dev.absmin[ABS_X + i] = val;
- if (val > ptr->dev.absmax[ABS_X + i])
- ptr->dev.absmax[ABS_X + i] = val;
+ if (val < dev->absmin[ABS_X + i])
+ dev->absmin[ABS_X + i] = val;
+ if (val > dev->absmax[ABS_X + i])
+ dev->absmax[ABS_X + i] = val;
#endif
- if (i%3) val = ptr->dev.absmax[ABS_X + i] - val;
+ if (i%3) val = dev->absmax[ABS_X + i] - val;
input_report_abs(dev, ABS_X + i, val);
} else {
val = (int) (((int8_t)lo) | ((int8_t)hi<<8));
@@ -233,26 +233,29 @@ static void hil_ptr_disconnect(struct serio *serio)
return;
}
- input_unregister_device(&ptr->dev);
serio_close(serio);
+ input_unregister_device(ptr->dev);
kfree(ptr);
}
static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver)
{
- struct hil_ptr *ptr;
- char *txt;
- unsigned int i, naxsets, btntype;
- uint8_t did, *idd;
+ struct hil_ptr *ptr;
+ char *txt;
+ unsigned int i, naxsets, btntype;
+ uint8_t did, *idd;
- if (!(ptr = kmalloc(sizeof(struct hil_ptr), GFP_KERNEL))) return -ENOMEM;
- memset(ptr, 0, sizeof(struct hil_ptr));
+ if (!(ptr = kzalloc(sizeof(struct hil_ptr), GFP_KERNEL)))
+ return -ENOMEM;
- if (serio_open(serio, driver)) goto bail0;
+ ptr->dev = input_allocate_device();
+ if (!ptr->dev) goto bail0;
+ ptr->dev->private = ptr;
+
+ if (serio_open(serio, driver)) goto bail1;
serio_set_drvdata(serio, ptr);
ptr->serio = serio;
- ptr->dev.private = ptr;
init_MUTEX_LOCKED(&(ptr->sem));
@@ -283,25 +286,24 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver)
up(&(ptr->sem));
- init_input_dev(&ptr->dev);
did = ptr->idd[0];
idd = ptr->idd + 1;
txt = "unknown";
if ((did & HIL_IDD_DID_TYPE_MASK) == HIL_IDD_DID_TYPE_REL) {
- ptr->dev.evbit[0] = BIT(EV_REL);
+ ptr->dev->evbit[0] = BIT(EV_REL);
txt = "relative";
}
if ((did & HIL_IDD_DID_TYPE_MASK) == HIL_IDD_DID_TYPE_ABS) {
- ptr->dev.evbit[0] = BIT(EV_ABS);
+ ptr->dev->evbit[0] = BIT(EV_ABS);
txt = "absolute";
}
- if (!ptr->dev.evbit[0]) {
- goto bail1;
+ if (!ptr->dev->evbit[0]) {
+ goto bail2;
}
ptr->nbtn = HIL_IDD_NUM_BUTTONS(idd);
- if (ptr->nbtn) ptr->dev.evbit[0] |= BIT(EV_KEY);
+ if (ptr->nbtn) ptr->dev->evbit[0] |= BIT(EV_KEY);
naxsets = HIL_IDD_NUM_AXSETS(*idd);
ptr->naxes = HIL_IDD_NUM_AXES_PER_SET(*idd);
@@ -325,7 +327,7 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver)
btntype = BTN_MOUSE;
for (i = 0; i < ptr->nbtn; i++) {
- set_bit(btntype | i, ptr->dev.keybit);
+ set_bit(btntype | i, ptr->dev->keybit);
ptr->btnmap[i] = btntype | i;
}
@@ -337,50 +339,52 @@ static int hil_ptr_connect(struct serio *serio, struct serio_driver *driver)
if ((did & HIL_IDD_DID_TYPE_MASK) == HIL_IDD_DID_TYPE_REL) {
for (i = 0; i < ptr->naxes; i++) {
- set_bit(REL_X + i, ptr->dev.relbit);
+ set_bit(REL_X + i, ptr->dev->relbit);
}
for (i = 3; (i < ptr->naxes + 3) && (naxsets > 1); i++) {
- set_bit(REL_X + i, ptr->dev.relbit);
+ set_bit(REL_X + i, ptr->dev->relbit);
}
} else {
for (i = 0; i < ptr->naxes; i++) {
- set_bit(ABS_X + i, ptr->dev.absbit);
- ptr->dev.absmin[ABS_X + i] = 0;
- ptr->dev.absmax[ABS_X + i] =
+ set_bit(ABS_X + i, ptr->dev->absbit);
+ ptr->dev->absmin[ABS_X + i] = 0;
+ ptr->dev->absmax[ABS_X + i] =
HIL_IDD_AXIS_MAX((ptr->idd + 1), i);
}
for (i = 3; (i < ptr->naxes + 3) && (naxsets > 1); i++) {
- set_bit(ABS_X + i, ptr->dev.absbit);
- ptr->dev.absmin[ABS_X + i] = 0;
- ptr->dev.absmax[ABS_X + i] =
+ set_bit(ABS_X + i, ptr->dev->absbit);
+ ptr->dev->absmin[ABS_X + i] = 0;
+ ptr->dev->absmax[ABS_X + i] =
HIL_IDD_AXIS_MAX((ptr->idd + 1), (i - 3));
}
#ifdef TABLET_AUTOADJUST
for (i = 0; i < ABS_MAX; i++) {
- int diff = ptr->dev.absmax[ABS_X + i] / 10;
- ptr->dev.absmin[ABS_X + i] += diff;
- ptr->dev.absmax[ABS_X + i] -= diff;
+ int diff = ptr->dev->absmax[ABS_X + i] / 10;
+ ptr->dev->absmin[ABS_X + i] += diff;
+ ptr->dev->absmax[ABS_X + i] -= diff;
}
#endif
}
- ptr->dev.name = strlen(ptr->rnm) ? ptr->rnm : HIL_GENERIC_NAME;
+ ptr->dev->name = strlen(ptr->rnm) ? ptr->rnm : HIL_GENERIC_NAME;
- ptr->dev.id.bustype = BUS_HIL;
- ptr->dev.id.vendor = PCI_VENDOR_ID_HP;
- ptr->dev.id.product = 0x0001; /* TODO: get from ptr->rsc */
- ptr->dev.id.version = 0x0100; /* TODO: get from ptr->rsc */
- ptr->dev.dev = &serio->dev;
+ ptr->dev->id.bustype = BUS_HIL;
+ ptr->dev->id.vendor = PCI_VENDOR_ID_HP;
+ ptr->dev->id.product = 0x0001; /* TODO: get from ptr->rsc */
+ ptr->dev->id.version = 0x0100; /* TODO: get from ptr->rsc */
+ ptr->dev->dev = &serio->dev;
- input_register_device(&ptr->dev);
+ input_register_device(ptr->dev);
printk(KERN_INFO "input: %s (%s), ID: %d\n",
- ptr->dev.name,
+ ptr->dev->name,
(btntype == BTN_MOUSE) ? "HIL mouse":"HIL tablet or touchpad",
did);
return 0;
- bail1:
+ bail2:
serio_close(serio);
+ bail1:
+ input_free_device(ptr->dev);
bail0:
kfree(ptr);
serio_set_drvdata(serio, NULL);
diff --git a/trunk/drivers/input/serio/gscps2.c b/trunk/drivers/input/serio/gscps2.c
index a7b0de0f92b2..c0b1e4becad3 100644
--- a/trunk/drivers/input/serio/gscps2.c
+++ b/trunk/drivers/input/serio/gscps2.c
@@ -1,7 +1,7 @@
/*
* drivers/input/serio/gscps2.c
*
- * Copyright (c) 2004 Helge Deller
+ * Copyright (c) 2004-2006 Helge Deller
* Copyright (c) 2002 Laurent Canet
* Copyright (c) 2002 Thibaut Varene
*
@@ -354,7 +354,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
memset(serio, 0, sizeof(struct serio));
ps2port->port = serio;
ps2port->padev = dev;
- ps2port->addr = ioremap(hpa, GSC_STATUS + 4);
+ ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
spin_lock_init(&ps2port->lock);
gscps2_reset(ps2port);
diff --git a/trunk/drivers/isdn/sc/ioctl.c b/trunk/drivers/isdn/sc/ioctl.c
index 94c9afb7017c..f4f71226a078 100644
--- a/trunk/drivers/isdn/sc/ioctl.c
+++ b/trunk/drivers/isdn/sc/ioctl.c
@@ -46,7 +46,8 @@ int sc_ioctl(int card, scs_ioctl *data)
pr_debug("%s: SCIOCRESET: ioctl received\n",
sc_adapter[card]->devicename);
sc_adapter[card]->StartOnReset = 0;
- return (reset(card));
+ kfree(rcvmsg);
+ return reset(card);
}
case SCIOCLOAD:
@@ -183,7 +184,7 @@ int sc_ioctl(int card, scs_ioctl *data)
sc_adapter[card]->devicename);
spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
- if(!spid) {
+ if (!spid) {
kfree(rcvmsg);
return -ENOMEM;
}
@@ -195,10 +196,10 @@ int sc_ioctl(int card, scs_ioctl *data)
if (!status) {
pr_debug("%s: SCIOCGETSPID: command successful\n",
sc_adapter[card]->devicename);
- }
- else {
+ } else {
pr_debug("%s: SCIOCGETSPID: command failed (status = %d)\n",
sc_adapter[card]->devicename, status);
+ kfree(spid);
kfree(rcvmsg);
return status;
}
diff --git a/trunk/drivers/leds/Kconfig b/trunk/drivers/leds/Kconfig
new file mode 100644
index 000000000000..2c4f20b7f021
--- /dev/null
+++ b/trunk/drivers/leds/Kconfig
@@ -0,0 +1,77 @@
+
+menu "LED devices"
+
+config NEW_LEDS
+ bool "LED Support"
+ help
+ Say Y to enable Linux LED support. This is not related to standard
+ keyboard LEDs which are controlled via the input system.
+
+config LEDS_CLASS
+ tristate "LED Class Support"
+ depends NEW_LEDS
+ help
+ This option enables the led sysfs class in /sys/class/leds. You'll
+ need this to do anything useful with LEDs. If unsure, say N.
+
+config LEDS_TRIGGERS
+ bool "LED Trigger support"
+ depends NEW_LEDS
+ help
+ This option enables trigger support for the leds class.
+ These triggers allow kernel events to drive the LEDs and can
+ be configured via sysfs. If unsure, say Y.
+
+config LEDS_CORGI
+ tristate "LED Support for the Sharp SL-C7x0 series"
+ depends LEDS_CLASS && PXA_SHARP_C7xx
+ help
+ This option enables support for the LEDs on Sharp Zaurus
+ SL-C7x0 series (C700, C750, C760, C860).
+
+config LEDS_LOCOMO
+ tristate "LED Support for Locomo device"
+ depends LEDS_CLASS && SHARP_LOCOMO
+ help
+ This option enables support for the LEDs on Sharp Locomo.
+ Zaurus models SL-5500 and SL-5600.
+
+config LEDS_SPITZ
+ tristate "LED Support for the Sharp SL-Cxx00 series"
+ depends LEDS_CLASS && PXA_SHARP_Cxx00
+ help
+ This option enables support for the LEDs on Sharp Zaurus
+ SL-Cxx00 series (C1000, C3000, C3100).
+
+config LEDS_IXP4XX
+ tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
+ depends LEDS_CLASS && ARCH_IXP4XX
+ help
+ This option enables support for the LEDs connected to GPIO
+ outputs of the Intel IXP4XX processors. To be useful the
+ particular board must have LEDs and they must be connected
+ to the GPIO lines. If unsure, say Y.
+
+config LEDS_TOSA
+ tristate "LED Support for the Sharp SL-6000 series"
+ depends LEDS_CLASS && PXA_SHARPSL
+ help
+ This option enables support for the LEDs on Sharp Zaurus
+ SL-6000 series.
+
+config LEDS_TRIGGER_TIMER
+ tristate "LED Timer Trigger"
+ depends LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by a programmable timer
+ via sysfs. If unsure, say Y.
+
+config LEDS_TRIGGER_IDE_DISK
+ bool "LED Timer Trigger"
+ depends LEDS_TRIGGERS && BLK_DEV_IDEDISK
+ help
+ This allows LEDs to be controlled by IDE disk activity.
+ If unsure, say Y.
+
+endmenu
+
diff --git a/trunk/drivers/leds/Makefile b/trunk/drivers/leds/Makefile
new file mode 100644
index 000000000000..40699d3cabbf
--- /dev/null
+++ b/trunk/drivers/leds/Makefile
@@ -0,0 +1,16 @@
+
+# LED Core
+obj-$(CONFIG_NEW_LEDS) += led-core.o
+obj-$(CONFIG_LEDS_CLASS) += led-class.o
+obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
+
+# LED Platform Drivers
+obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o
+obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
+obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
+obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
+obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
+
+# LED Triggers
+obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
+obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
diff --git a/trunk/drivers/leds/led-class.c b/trunk/drivers/leds/led-class.c
new file mode 100644
index 000000000000..b0b5d05fadd6
--- /dev/null
+++ b/trunk/drivers/leds/led-class.c
@@ -0,0 +1,167 @@
+/*
+ * LED Class Core
+ *
+ * Copyright (C) 2005 John Lenz
+ * Copyright (C) 2005-2006 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "leds.h"
+
+static struct class *leds_class;
+
+static ssize_t led_brightness_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ ssize_t ret = 0;
+
+ /* no lock needed for this */
+ sprintf(buf, "%u\n", led_cdev->brightness);
+ ret = strlen(buf) + 1;
+
+ return ret;
+}
+
+static ssize_t led_brightness_store(struct class_device *dev,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ ssize_t ret = -EINVAL;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+
+ if (after - buf > 0) {
+ ret = after - buf;
+ led_set_brightness(led_cdev, state);
+ }
+
+ return ret;
+}
+
+static CLASS_DEVICE_ATTR(brightness, 0644, led_brightness_show,
+ led_brightness_store);
+#ifdef CONFIG_LEDS_TRIGGERS
+static CLASS_DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
+#endif
+
+/**
+ * led_classdev_suspend - suspend an led_classdev.
+ * @led_cdev: the led_classdev to suspend.
+ */
+void led_classdev_suspend(struct led_classdev *led_cdev)
+{
+ led_cdev->flags |= LED_SUSPENDED;
+ led_cdev->brightness_set(led_cdev, 0);
+}
+EXPORT_SYMBOL_GPL(led_classdev_suspend);
+
+/**
+ * led_classdev_resume - resume an led_classdev.
+ * @led_cdev: the led_classdev to resume.
+ */
+void led_classdev_resume(struct led_classdev *led_cdev)
+{
+ led_cdev->brightness_set(led_cdev, led_cdev->brightness);
+ led_cdev->flags &= ~LED_SUSPENDED;
+}
+EXPORT_SYMBOL_GPL(led_classdev_resume);
+
+/**
+ * led_classdev_register - register a new object of led_classdev class.
+ * @dev: The device to register.
+ * @led_cdev: the led_classdev structure for this device.
+ */
+int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
+{
+ led_cdev->class_dev = class_device_create(leds_class, NULL, 0,
+ parent, "%s", led_cdev->name);
+ if (unlikely(IS_ERR(led_cdev->class_dev)))
+ return PTR_ERR(led_cdev->class_dev);
+
+ class_set_devdata(led_cdev->class_dev, led_cdev);
+
+ /* register the attributes */
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_brightness);
+
+ /* add to the list of leds */
+ write_lock(&leds_list_lock);
+ list_add_tail(&led_cdev->node, &leds_list);
+ write_unlock(&leds_list_lock);
+
+#ifdef CONFIG_LEDS_TRIGGERS
+ rwlock_init(&led_cdev->trigger_lock);
+
+ led_trigger_set_default(led_cdev);
+
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_trigger);
+#endif
+
+ printk(KERN_INFO "Registered led device: %s\n",
+ led_cdev->class_dev->class_id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_classdev_register);
+
+/**
+ * led_classdev_unregister - unregisters a object of led_properties class.
+ * @led_cdev: the led device to unreigister
+ *
+ * Unregisters a previously registered via led_classdev_register object.
+ */
+void led_classdev_unregister(struct led_classdev *led_cdev)
+{
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_brightness);
+#ifdef CONFIG_LEDS_TRIGGERS
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_trigger);
+ write_lock(&led_cdev->trigger_lock);
+ if (led_cdev->trigger)
+ led_trigger_set(led_cdev, NULL);
+ write_unlock(&led_cdev->trigger_lock);
+#endif
+
+ class_device_unregister(led_cdev->class_dev);
+
+ write_lock(&leds_list_lock);
+ list_del(&led_cdev->node);
+ write_unlock(&leds_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_classdev_unregister);
+
+static int __init leds_init(void)
+{
+ leds_class = class_create(THIS_MODULE, "leds");
+ if (IS_ERR(leds_class))
+ return PTR_ERR(leds_class);
+ return 0;
+}
+
+static void __exit leds_exit(void)
+{
+ class_destroy(leds_class);
+}
+
+subsys_initcall(leds_init);
+module_exit(leds_exit);
+
+MODULE_AUTHOR("John Lenz, Richard Purdie");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LED Class Interface");
diff --git a/trunk/drivers/leds/led-core.c b/trunk/drivers/leds/led-core.c
new file mode 100644
index 000000000000..fe6541326c71
--- /dev/null
+++ b/trunk/drivers/leds/led-core.c
@@ -0,0 +1,25 @@
+/*
+ * LED Class Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "leds.h"
+
+rwlock_t leds_list_lock = RW_LOCK_UNLOCKED;
+LIST_HEAD(leds_list);
+
+EXPORT_SYMBOL_GPL(leds_list);
+EXPORT_SYMBOL_GPL(leds_list_lock);
diff --git a/trunk/drivers/leds/led-triggers.c b/trunk/drivers/leds/led-triggers.c
new file mode 100644
index 000000000000..5e2cd8be1191
--- /dev/null
+++ b/trunk/drivers/leds/led-triggers.c
@@ -0,0 +1,239 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "leds.h"
+
+/*
+ * Nests outside led_cdev->trigger_lock
+ */
+static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED;
+static LIST_HEAD(trigger_list);
+
+ssize_t led_trigger_store(struct class_device *dev, const char *buf,
+ size_t count)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ char trigger_name[TRIG_NAME_MAX];
+ struct led_trigger *trig;
+ size_t len;
+
+ trigger_name[sizeof(trigger_name) - 1] = '\0';
+ strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
+ len = strlen(trigger_name);
+
+ if (len && trigger_name[len - 1] == '\n')
+ trigger_name[len - 1] = '\0';
+
+ if (!strcmp(trigger_name, "none")) {
+ write_lock(&led_cdev->trigger_lock);
+ led_trigger_set(led_cdev, NULL);
+ write_unlock(&led_cdev->trigger_lock);
+ return count;
+ }
+
+ read_lock(&triggers_list_lock);
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (!strcmp(trigger_name, trig->name)) {
+ write_lock(&led_cdev->trigger_lock);
+ led_trigger_set(led_cdev, trig);
+ write_unlock(&led_cdev->trigger_lock);
+
+ read_unlock(&triggers_list_lock);
+ return count;
+ }
+ }
+ read_unlock(&triggers_list_lock);
+
+ return -EINVAL;
+}
+
+
+ssize_t led_trigger_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_trigger *trig;
+ int len = 0;
+
+ read_lock(&triggers_list_lock);
+ read_lock(&led_cdev->trigger_lock);
+
+ if (!led_cdev->trigger)
+ len += sprintf(buf+len, "[none] ");
+ else
+ len += sprintf(buf+len, "none ");
+
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
+ trig->name))
+ len += sprintf(buf+len, "[%s] ", trig->name);
+ else
+ len += sprintf(buf+len, "%s ", trig->name);
+ }
+ read_unlock(&led_cdev->trigger_lock);
+ read_unlock(&triggers_list_lock);
+
+ len += sprintf(len+buf, "\n");
+ return len;
+}
+
+void led_trigger_event(struct led_trigger *trigger,
+ enum led_brightness brightness)
+{
+ struct list_head *entry;
+
+ if (!trigger)
+ return;
+
+ read_lock(&trigger->leddev_list_lock);
+ list_for_each(entry, &trigger->led_cdevs) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = list_entry(entry, struct led_classdev, trig_list);
+ led_set_brightness(led_cdev, brightness);
+ }
+ read_unlock(&trigger->leddev_list_lock);
+}
+
+/* Caller must ensure led_cdev->trigger_lock held */
+void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
+{
+ unsigned long flags;
+
+ /* Remove any existing trigger */
+ if (led_cdev->trigger) {
+ write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ if (led_cdev->trigger->deactivate)
+ led_cdev->trigger->deactivate(led_cdev);
+ }
+ if (trigger) {
+ write_lock_irqsave(&trigger->leddev_list_lock, flags);
+ list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
+ write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
+ if (trigger->activate)
+ trigger->activate(led_cdev);
+ }
+ led_cdev->trigger = trigger;
+}
+
+void led_trigger_set_default(struct led_classdev *led_cdev)
+{
+ struct led_trigger *trig;
+
+ if (!led_cdev->default_trigger)
+ return;
+
+ read_lock(&triggers_list_lock);
+ write_lock(&led_cdev->trigger_lock);
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (!strcmp(led_cdev->default_trigger, trig->name))
+ led_trigger_set(led_cdev, trig);
+ }
+ write_unlock(&led_cdev->trigger_lock);
+ read_unlock(&triggers_list_lock);
+}
+
+int led_trigger_register(struct led_trigger *trigger)
+{
+ struct led_classdev *led_cdev;
+
+ rwlock_init(&trigger->leddev_list_lock);
+ INIT_LIST_HEAD(&trigger->led_cdevs);
+
+ /* Add to the list of led triggers */
+ write_lock(&triggers_list_lock);
+ list_add_tail(&trigger->next_trig, &trigger_list);
+ write_unlock(&triggers_list_lock);
+
+ /* Register with any LEDs that have this as a default trigger */
+ read_lock(&leds_list_lock);
+ list_for_each_entry(led_cdev, &leds_list, node) {
+ write_lock(&led_cdev->trigger_lock);
+ if (!led_cdev->trigger && led_cdev->default_trigger &&
+ !strcmp(led_cdev->default_trigger, trigger->name))
+ led_trigger_set(led_cdev, trigger);
+ write_unlock(&led_cdev->trigger_lock);
+ }
+ read_unlock(&leds_list_lock);
+
+ return 0;
+}
+
+void led_trigger_register_simple(const char *name, struct led_trigger **tp)
+{
+ struct led_trigger *trigger;
+
+ trigger = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
+
+ if (trigger) {
+ trigger->name = name;
+ led_trigger_register(trigger);
+ }
+ *tp = trigger;
+}
+
+void led_trigger_unregister(struct led_trigger *trigger)
+{
+ struct led_classdev *led_cdev;
+
+ /* Remove from the list of led triggers */
+ write_lock(&triggers_list_lock);
+ list_del(&trigger->next_trig);
+ write_unlock(&triggers_list_lock);
+
+ /* Remove anyone actively using this trigger */
+ read_lock(&leds_list_lock);
+ list_for_each_entry(led_cdev, &leds_list, node) {
+ write_lock(&led_cdev->trigger_lock);
+ if (led_cdev->trigger == trigger)
+ led_trigger_set(led_cdev, NULL);
+ write_unlock(&led_cdev->trigger_lock);
+ }
+ read_unlock(&leds_list_lock);
+}
+
+void led_trigger_unregister_simple(struct led_trigger *trigger)
+{
+ led_trigger_unregister(trigger);
+ kfree(trigger);
+}
+
+/* Used by LED Class */
+EXPORT_SYMBOL_GPL(led_trigger_set);
+EXPORT_SYMBOL_GPL(led_trigger_set_default);
+EXPORT_SYMBOL_GPL(led_trigger_show);
+EXPORT_SYMBOL_GPL(led_trigger_store);
+
+/* LED Trigger Interface */
+EXPORT_SYMBOL_GPL(led_trigger_register);
+EXPORT_SYMBOL_GPL(led_trigger_unregister);
+
+/* Simple LED Tigger Interface */
+EXPORT_SYMBOL_GPL(led_trigger_register_simple);
+EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
+EXPORT_SYMBOL_GPL(led_trigger_event);
+
+MODULE_AUTHOR("Richard Purdie");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LED Triggers Core");
diff --git a/trunk/drivers/leds/leds-corgi.c b/trunk/drivers/leds/leds-corgi.c
new file mode 100644
index 000000000000..bb7d84df0121
--- /dev/null
+++ b/trunk/drivers/leds/leds-corgi.c
@@ -0,0 +1,121 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static void corgiled_amber_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ GPSR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
+ else
+ GPCR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
+}
+
+static void corgiled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN);
+ else
+ reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_LED_GREEN);
+}
+
+static struct led_classdev corgi_amber_led = {
+ .name = "corgi:amber",
+ .default_trigger = "sharpsl-charge",
+ .brightness_set = corgiled_amber_set,
+};
+
+static struct led_classdev corgi_green_led = {
+ .name = "corgi:green",
+ .default_trigger = "nand-disk",
+ .brightness_set = corgiled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int corgiled_suspend(struct platform_device *dev, pm_message_t state)
+{
+#ifdef CONFIG_LEDS_TRIGGERS
+ if (corgi_amber_led.trigger && strcmp(corgi_amber_led.trigger->name, "sharpsl-charge"))
+#endif
+ led_classdev_suspend(&corgi_amber_led);
+ led_classdev_suspend(&corgi_green_led);
+ return 0;
+}
+
+static int corgiled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&corgi_amber_led);
+ led_classdev_resume(&corgi_green_led);
+ return 0;
+}
+#endif
+
+static int corgiled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &corgi_amber_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &corgi_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&corgi_amber_led);
+
+ return ret;
+}
+
+static int corgiled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&corgi_amber_led);
+ led_classdev_unregister(&corgi_green_led);
+ return 0;
+}
+
+static struct platform_driver corgiled_driver = {
+ .probe = corgiled_probe,
+ .remove = corgiled_remove,
+#ifdef CONFIG_PM
+ .suspend = corgiled_suspend,
+ .resume = corgiled_resume,
+#endif
+ .driver = {
+ .name = "corgi-led",
+ },
+};
+
+static int __init corgiled_init(void)
+{
+ return platform_driver_register(&corgiled_driver);
+}
+
+static void __exit corgiled_exit(void)
+{
+ platform_driver_unregister(&corgiled_driver);
+}
+
+module_init(corgiled_init);
+module_exit(corgiled_exit);
+
+MODULE_AUTHOR("Richard Purdie ");
+MODULE_DESCRIPTION("Corgi LED driver");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/leds/leds-ixp4xx-gpio.c b/trunk/drivers/leds/leds-ixp4xx-gpio.c
new file mode 100644
index 000000000000..30ced150e4cf
--- /dev/null
+++ b/trunk/drivers/leds/leds-ixp4xx-gpio.c
@@ -0,0 +1,215 @@
+/*
+ * IXP4XX GPIO driver LED driver
+ *
+ * Author: John Bowler
+ *
+ * Copyright (c) 2006 John Bowler
+ *
+ * Permission is hereby granted, free of charge, to any
+ * person obtaining a copy of this software and associated
+ * documentation files (the "Software"), to deal in the
+ * Software without restriction, including without
+ * limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of
+ * the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the
+ * following conditions:
+ *
+ * The above copyright notice and this permission notice
+ * shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+ * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+ * SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+extern spinlock_t gpio_lock;
+
+/* Up to 16 gpio lines are possible. */
+#define GPIO_MAX 16
+static struct ixp4xxgpioled_device {
+ struct led_classdev ancestor;
+ int flags;
+} ixp4xxgpioled_devices[GPIO_MAX];
+
+void ixp4xxgpioled_brightness_set(struct led_classdev *pled,
+ enum led_brightness value)
+{
+ const struct ixp4xxgpioled_device *const ixp4xx_dev =
+ container_of(pled, struct ixp4xxgpioled_device, ancestor);
+ const u32 gpio_pin = ixp4xx_dev - ixp4xxgpioled_devices;
+
+ if (gpio_pin < GPIO_MAX && ixp4xx_dev->ancestor.name != 0) {
+ /* Set or clear the 'gpio_pin' bit according to the style
+ * and the required setting (value > 0 == on)
+ */
+ const int gpio_value =
+ (value > 0) == (ixp4xx_dev->flags != IXP4XX_GPIO_LOW) ?
+ IXP4XX_GPIO_HIGH : IXP4XX_GPIO_LOW;
+
+ {
+ unsigned long flags;
+ spin_lock_irqsave(&gpio_lock, flags);
+ gpio_line_set(gpio_pin, gpio_value);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ }
+ }
+}
+
+/* LEDs are described in resources, the following iterates over the valid
+ * LED resources.
+ */
+#define for_all_leds(i, pdev) \
+ for (i=0; inum_resources; ++i) \
+ if (pdev->resource[i].start < GPIO_MAX && \
+ pdev->resource[i].name != 0)
+
+/* The following applies 'operation' to each LED from the given platform,
+ * the function always returns 0 to allow tail call elimination.
+ */
+static int apply_to_all_leds(struct platform_device *pdev,
+ void (*operation)(struct led_classdev *pled))
+{
+ int i;
+
+ for_all_leds(i, pdev)
+ operation(&ixp4xxgpioled_devices[pdev->resource[i].start].ancestor);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixp4xxgpioled_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return apply_to_all_leds(pdev, led_classdev_suspend);
+}
+
+static int ixp4xxgpioled_resume(struct platform_device *pdev)
+{
+ return apply_to_all_leds(pdev, led_classdev_resume);
+}
+#endif
+
+static void ixp4xxgpioled_remove_one_led(struct led_classdev *pled)
+{
+ led_classdev_unregister(pled);
+ pled->name = 0;
+}
+
+static int ixp4xxgpioled_remove(struct platform_device *pdev)
+{
+ return apply_to_all_leds(pdev, ixp4xxgpioled_remove_one_led);
+}
+
+static int ixp4xxgpioled_probe(struct platform_device *pdev)
+{
+ /* The board level has to tell the driver where the
+ * LEDs are connected - there is no way to find out
+ * electrically. It must also say whether the GPIO
+ * lines are active high or active low.
+ *
+ * To do this read the num_resources (the number of
+ * LEDs) and the struct resource (the data for each
+ * LED). The name comes from the resource, and it
+ * isn't copied.
+ */
+ int i;
+
+ for_all_leds(i, pdev) {
+ const u8 gpio_pin = pdev->resource[i].start;
+ int rc;
+
+ if (ixp4xxgpioled_devices[gpio_pin].ancestor.name == 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+ gpio_line_config(gpio_pin, IXP4XX_GPIO_OUT);
+ /* The config can, apparently, reset the state,
+ * I suspect the gpio line may be an input and
+ * the config may cause the line to be latched,
+ * so the setting depends on how the LED is
+ * connected to the line (which affects how it
+ * floats if not driven).
+ */
+ gpio_line_set(gpio_pin, IXP4XX_GPIO_HIGH);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ ixp4xxgpioled_devices[gpio_pin].flags =
+ pdev->resource[i].flags & IORESOURCE_BITS;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.name =
+ pdev->resource[i].name;
+
+ /* This is how a board manufacturer makes the LED
+ * come on on reset - the GPIO line will be high, so
+ * make the LED light when the line is low...
+ */
+ if (ixp4xxgpioled_devices[gpio_pin].flags != IXP4XX_GPIO_LOW)
+ ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 100;
+ else
+ ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 0;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.flags = 0;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.brightness_set =
+ ixp4xxgpioled_brightness_set;
+
+ ixp4xxgpioled_devices[gpio_pin].ancestor.default_trigger = 0;
+ }
+
+ rc = led_classdev_register(&pdev->dev,
+ &ixp4xxgpioled_devices[gpio_pin].ancestor);
+ if (rc < 0) {
+ ixp4xxgpioled_devices[gpio_pin].ancestor.name = 0;
+ ixp4xxgpioled_remove(pdev);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver ixp4xxgpioled_driver = {
+ .probe = ixp4xxgpioled_probe,
+ .remove = ixp4xxgpioled_remove,
+#ifdef CONFIG_PM
+ .suspend = ixp4xxgpioled_suspend,
+ .resume = ixp4xxgpioled_resume,
+#endif
+ .driver = {
+ .name = "IXP4XX-GPIO-LED",
+ },
+};
+
+static int __init ixp4xxgpioled_init(void)
+{
+ return platform_driver_register(&ixp4xxgpioled_driver);
+}
+
+static void __exit ixp4xxgpioled_exit(void)
+{
+ platform_driver_unregister(&ixp4xxgpioled_driver);
+}
+
+module_init(ixp4xxgpioled_init);
+module_exit(ixp4xxgpioled_exit);
+
+MODULE_AUTHOR("John Bowler ");
+MODULE_DESCRIPTION("IXP4XX GPIO LED driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/trunk/drivers/leds/leds-locomo.c b/trunk/drivers/leds/leds-locomo.c
new file mode 100644
index 000000000000..749a86c2adb6
--- /dev/null
+++ b/trunk/drivers/leds/leds-locomo.c
@@ -0,0 +1,95 @@
+/*
+ * linux/drivers/leds/locomo.c
+ *
+ * Copyright (C) 2005 John Lenz
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+static void locomoled_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value, int offset)
+{
+ struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->class_dev->dev);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (value)
+ locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset);
+ else
+ locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset);
+ local_irq_restore(flags);
+}
+
+static void locomoled_brightness_set0(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0);
+}
+
+static void locomoled_brightness_set1(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1);
+}
+
+static struct led_classdev locomo_led0 = {
+ .name = "locomo:amber",
+ .brightness_set = locomoled_brightness_set0,
+};
+
+static struct led_classdev locomo_led1 = {
+ .name = "locomo:green",
+ .brightness_set = locomoled_brightness_set1,
+};
+
+static int locomoled_probe(struct locomo_dev *ldev)
+{
+ int ret;
+
+ ret = led_classdev_register(&ldev->dev, &locomo_led0);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&ldev->dev, &locomo_led1);
+ if (ret < 0)
+ led_classdev_unregister(&locomo_led0);
+
+ return ret;
+}
+
+static int locomoled_remove(struct locomo_dev *dev)
+{
+ led_classdev_unregister(&locomo_led0);
+ led_classdev_unregister(&locomo_led1);
+ return 0;
+}
+
+static struct locomo_driver locomoled_driver = {
+ .drv = {
+ .name = "locomoled"
+ },
+ .devid = LOCOMO_DEVID_LED,
+ .probe = locomoled_probe,
+ .remove = locomoled_remove,
+};
+
+static int __init locomoled_init(void)
+{
+ return locomo_driver_register(&locomoled_driver);
+}
+module_init(locomoled_init);
+
+MODULE_AUTHOR("John Lenz ");
+MODULE_DESCRIPTION("Locomo LED driver");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/leds/leds-spitz.c b/trunk/drivers/leds/leds-spitz.c
new file mode 100644
index 000000000000..65bbef4a5e09
--- /dev/null
+++ b/trunk/drivers/leds/leds-spitz.c
@@ -0,0 +1,125 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static void spitzled_amber_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
+ else
+ reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
+}
+
+static void spitzled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_GREEN);
+ else
+ reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_GREEN);
+}
+
+static struct led_classdev spitz_amber_led = {
+ .name = "spitz:amber",
+ .default_trigger = "sharpsl-charge",
+ .brightness_set = spitzled_amber_set,
+};
+
+static struct led_classdev spitz_green_led = {
+ .name = "spitz:green",
+ .default_trigger = "ide-disk",
+ .brightness_set = spitzled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int spitzled_suspend(struct platform_device *dev, pm_message_t state)
+{
+#ifdef CONFIG_LEDS_TRIGGERS
+ if (spitz_amber_led.trigger && strcmp(spitz_amber_led.trigger->name, "sharpsl-charge"))
+#endif
+ led_classdev_suspend(&spitz_amber_led);
+ led_classdev_suspend(&spitz_green_led);
+ return 0;
+}
+
+static int spitzled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&spitz_amber_led);
+ led_classdev_resume(&spitz_green_led);
+ return 0;
+}
+#endif
+
+static int spitzled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (machine_is_akita())
+ spitz_green_led.default_trigger = "nand-disk";
+
+ ret = led_classdev_register(&pdev->dev, &spitz_amber_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &spitz_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&spitz_amber_led);
+
+ return ret;
+}
+
+static int spitzled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&spitz_amber_led);
+ led_classdev_unregister(&spitz_green_led);
+
+ return 0;
+}
+
+static struct platform_driver spitzled_driver = {
+ .probe = spitzled_probe,
+ .remove = spitzled_remove,
+#ifdef CONFIG_PM
+ .suspend = spitzled_suspend,
+ .resume = spitzled_resume,
+#endif
+ .driver = {
+ .name = "spitz-led",
+ },
+};
+
+static int __init spitzled_init(void)
+{
+ return platform_driver_register(&spitzled_driver);
+}
+
+static void __exit spitzled_exit(void)
+{
+ platform_driver_unregister(&spitzled_driver);
+}
+
+module_init(spitzled_init);
+module_exit(spitzled_exit);
+
+MODULE_AUTHOR("Richard Purdie ");
+MODULE_DESCRIPTION("Spitz LED driver");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/leds/leds-tosa.c b/trunk/drivers/leds/leds-tosa.c
new file mode 100644
index 000000000000..c9e8cc1ec481
--- /dev/null
+++ b/trunk/drivers/leds/leds-tosa.c
@@ -0,0 +1,131 @@
+/*
+ * LED Triggers Core
+ *
+ * Copyright 2005 Dirk Opfer
+ *
+ * Author: Dirk Opfer
+ * based on spitz.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static void tosaled_amber_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_CHRG_ERR_LED);
+ else
+ reset_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_CHRG_ERR_LED);
+}
+
+static void tosaled_green_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value)
+ set_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_NOTE_LED);
+ else
+ reset_scoop_gpio(&tosascoop_jc_device.dev,
+ TOSA_SCOOP_JC_NOTE_LED);
+}
+
+static struct led_classdev tosa_amber_led = {
+ .name = "tosa:amber",
+ .default_trigger = "sharpsl-charge",
+ .brightness_set = tosaled_amber_set,
+};
+
+static struct led_classdev tosa_green_led = {
+ .name = "tosa:green",
+ .default_trigger = "nand-disk",
+ .brightness_set = tosaled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int tosaled_suspend(struct platform_device *dev, pm_message_t state)
+{
+#ifdef CONFIG_LEDS_TRIGGERS
+ if (tosa_amber_led.trigger && strcmp(tosa_amber_led.trigger->name,
+ "sharpsl-charge"))
+#endif
+ led_classdev_suspend(&tosa_amber_led);
+ led_classdev_suspend(&tosa_green_led);
+ return 0;
+}
+
+static int tosaled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&tosa_amber_led);
+ led_classdev_resume(&tosa_green_led);
+ return 0;
+}
+#else
+#define tosaled_suspend NULL
+#define tosaled_resume NULL
+#endif
+
+static int tosaled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &tosa_amber_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &tosa_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&tosa_amber_led);
+
+ return ret;
+}
+
+static int tosaled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&tosa_amber_led);
+ led_classdev_unregister(&tosa_green_led);
+
+ return 0;
+}
+
+static struct platform_driver tosaled_driver = {
+ .probe = tosaled_probe,
+ .remove = tosaled_remove,
+ .suspend = tosaled_suspend,
+ .resume = tosaled_resume,
+ .driver = {
+ .name = "tosa-led",
+ },
+};
+
+static int __init tosaled_init(void)
+{
+ return platform_driver_register(&tosaled_driver);
+}
+
+static void __exit tosaled_exit(void)
+{
+ platform_driver_unregister(&tosaled_driver);
+}
+
+module_init(tosaled_init);
+module_exit(tosaled_exit);
+
+MODULE_AUTHOR("Dirk Opfer ");
+MODULE_DESCRIPTION("Tosa LED driver");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/leds/leds.h b/trunk/drivers/leds/leds.h
new file mode 100644
index 000000000000..a715c4ed93ff
--- /dev/null
+++ b/trunk/drivers/leds/leds.h
@@ -0,0 +1,44 @@
+/*
+ * LED Core
+ *
+ * Copyright 2005 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LEDS_H_INCLUDED
+#define __LEDS_H_INCLUDED
+
+#include
+
+static inline void led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value > LED_FULL)
+ value = LED_FULL;
+ led_cdev->brightness = value;
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->brightness_set(led_cdev, value);
+}
+
+extern rwlock_t leds_list_lock;
+extern struct list_head leds_list;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+void led_trigger_set_default(struct led_classdev *led_cdev);
+void led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
+#else
+#define led_trigger_set_default(x) do {} while(0)
+#define led_trigger_set(x, y) do {} while(0)
+#endif
+
+ssize_t led_trigger_store(struct class_device *dev, const char *buf,
+ size_t count);
+ssize_t led_trigger_show(struct class_device *dev, char *buf);
+
+#endif /* __LEDS_H_INCLUDED */
diff --git a/trunk/drivers/leds/ledtrig-ide-disk.c b/trunk/drivers/leds/ledtrig-ide-disk.c
new file mode 100644
index 000000000000..fa651886ab4f
--- /dev/null
+++ b/trunk/drivers/leds/ledtrig-ide-disk.c
@@ -0,0 +1,62 @@
+/*
+ * LED IDE-Disk Activity Trigger
+ *
+ * Copyright 2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+static void ledtrig_ide_timerfunc(unsigned long data);
+
+DEFINE_LED_TRIGGER(ledtrig_ide);
+static DEFINE_TIMER(ledtrig_ide_timer, ledtrig_ide_timerfunc, 0, 0);
+static int ide_activity;
+static int ide_lastactivity;
+
+void ledtrig_ide_activity(void)
+{
+ ide_activity++;
+ if (!timer_pending(&ledtrig_ide_timer))
+ mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
+}
+EXPORT_SYMBOL(ledtrig_ide_activity);
+
+static void ledtrig_ide_timerfunc(unsigned long data)
+{
+ if (ide_lastactivity != ide_activity) {
+ ide_lastactivity = ide_activity;
+ led_trigger_event(ledtrig_ide, LED_FULL);
+ mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
+ } else {
+ led_trigger_event(ledtrig_ide, LED_OFF);
+ }
+}
+
+static int __init ledtrig_ide_init(void)
+{
+ led_trigger_register_simple("ide-disk", &ledtrig_ide);
+ return 0;
+}
+
+static void __exit ledtrig_ide_exit(void)
+{
+ led_trigger_unregister_simple(ledtrig_ide);
+}
+
+module_init(ledtrig_ide_init);
+module_exit(ledtrig_ide_exit);
+
+MODULE_AUTHOR("Richard Purdie ");
+MODULE_DESCRIPTION("LED IDE Disk Activity Trigger");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/leds/ledtrig-timer.c b/trunk/drivers/leds/ledtrig-timer.c
new file mode 100644
index 000000000000..f484b5d6dbf8
--- /dev/null
+++ b/trunk/drivers/leds/ledtrig-timer.c
@@ -0,0 +1,170 @@
+/*
+ * LED Kernel Timer Trigger
+ *
+ * Copyright 2005-2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "leds.h"
+
+struct timer_trig_data {
+ unsigned long delay_on; /* milliseconds on */
+ unsigned long delay_off; /* milliseconds off */
+ struct timer_list timer;
+};
+
+static void led_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *) data;
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+ unsigned long brightness = LED_OFF;
+ unsigned long delay = timer_data->delay_off;
+
+ if (!timer_data->delay_on || !timer_data->delay_off) {
+ led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
+ if (!led_cdev->brightness) {
+ brightness = LED_FULL;
+ delay = timer_data->delay_on;
+ }
+
+ led_set_brightness(led_cdev, brightness);
+
+ mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static ssize_t led_delay_on_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+
+ sprintf(buf, "%lu\n", timer_data->delay_on);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+
+ if (after - buf > 0) {
+ timer_data->delay_on = state;
+ mod_timer(&timer_data->timer, jiffies + 1);
+ ret = after - buf;
+ }
+
+ return ret;
+}
+
+static ssize_t led_delay_off_show(struct class_device *dev, char *buf)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+
+ sprintf(buf, "%lu\n", timer_data->delay_off);
+
+ return strlen(buf) + 1;
+}
+
+static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+ int ret = -EINVAL;
+ char *after;
+ unsigned long state = simple_strtoul(buf, &after, 10);
+
+ if (after - buf > 0) {
+ timer_data->delay_off = state;
+ mod_timer(&timer_data->timer, jiffies + 1);
+ ret = after - buf;
+ }
+
+ return ret;
+}
+
+static CLASS_DEVICE_ATTR(delay_on, 0644, led_delay_on_show,
+ led_delay_on_store);
+static CLASS_DEVICE_ATTR(delay_off, 0644, led_delay_off_show,
+ led_delay_off_store);
+
+static void timer_trig_activate(struct led_classdev *led_cdev)
+{
+ struct timer_trig_data *timer_data;
+
+ timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
+ if (!timer_data)
+ return;
+
+ led_cdev->trigger_data = timer_data;
+
+ init_timer(&timer_data->timer);
+ timer_data->timer.function = led_timer_function;
+ timer_data->timer.data = (unsigned long) led_cdev;
+
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_delay_on);
+ class_device_create_file(led_cdev->class_dev,
+ &class_device_attr_delay_off);
+}
+
+static void timer_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct timer_trig_data *timer_data = led_cdev->trigger_data;
+
+ if (timer_data) {
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_delay_on);
+ class_device_remove_file(led_cdev->class_dev,
+ &class_device_attr_delay_off);
+ del_timer_sync(&timer_data->timer);
+ kfree(timer_data);
+ }
+}
+
+static struct led_trigger timer_led_trigger = {
+ .name = "timer",
+ .activate = timer_trig_activate,
+ .deactivate = timer_trig_deactivate,
+};
+
+static int __init timer_trig_init(void)
+{
+ return led_trigger_register(&timer_led_trigger);
+}
+
+static void __exit timer_trig_exit(void)
+{
+ led_trigger_unregister(&timer_led_trigger);
+}
+
+module_init(timer_trig_init);
+module_exit(timer_trig_exit);
+
+MODULE_AUTHOR("Richard Purdie ");
+MODULE_DESCRIPTION("Timer LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c
index 039e071c1007..1ed5152db450 100644
--- a/trunk/drivers/md/md.c
+++ b/trunk/drivers/md/md.c
@@ -215,13 +215,11 @@ static void mddev_put(mddev_t *mddev)
return;
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
- /* that blocks */
+ spin_unlock(&all_mddevs_lock);
blk_cleanup_queue(mddev->queue);
- /* that also blocks */
kobject_unregister(&mddev->kobj);
- /* result blows... */
- }
- spin_unlock(&all_mddevs_lock);
+ } else
+ spin_unlock(&all_mddevs_lock);
}
static mddev_t * mddev_find(dev_t unit)
diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c
index 3cb0872a845d..9b374c91db66 100644
--- a/trunk/drivers/md/raid1.c
+++ b/trunk/drivers/md/raid1.c
@@ -1135,8 +1135,19 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
mirror = i;
break;
}
- if (!uptodate)
+ if (!uptodate) {
+ int sync_blocks = 0;
+ sector_t s = r1_bio->sector;
+ long sectors_to_go = r1_bio->sectors;
+ /* make sure these bits doesn't get cleared. */
+ do {
+ bitmap_end_sync(mddev->bitmap, r1_bio->sector,
+ &sync_blocks, 1);
+ s += sync_blocks;
+ sectors_to_go -= sync_blocks;
+ } while (sectors_to_go > 0);
md_error(mddev, conf->mirrors[mirror].rdev);
+ }
update_head_pos(mirror, r1_bio);
diff --git a/trunk/drivers/md/raid6main.c b/trunk/drivers/md/raid6main.c
index 6df4930fddec..ab64b37e4996 100644
--- a/trunk/drivers/md/raid6main.c
+++ b/trunk/drivers/md/raid6main.c
@@ -2151,6 +2151,8 @@ static int run(mddev_t *mddev)
}
/* Ok, everything is just fine now */
+ sysfs_create_group(&mddev->kobj, &raid6_attrs_group);
+
mddev->array_size = mddev->size * (mddev->raid_disks - 2);
mddev->queue->unplug_fn = raid6_unplug_device;
diff --git a/trunk/drivers/media/video/cpia_pp.c b/trunk/drivers/media/video/cpia_pp.c
index 3021f21aae36..0b00e6027dfb 100644
--- a/trunk/drivers/media/video/cpia_pp.c
+++ b/trunk/drivers/media/video/cpia_pp.c
@@ -873,7 +873,7 @@ static int __init cpia_pp_setup(char *str)
parport_nr[parport_ptr++] = PPCPIA_PARPORT_NONE;
}
- return 0;
+ return 1;
}
__setup("cpia_pp=", cpia_pp_setup);
diff --git a/trunk/drivers/mtd/chips/amd_flash.c b/trunk/drivers/mtd/chips/amd_flash.c
index fdb91b6f1d97..57115618c496 100644
--- a/trunk/drivers/mtd/chips/amd_flash.c
+++ b/trunk/drivers/mtd/chips/amd_flash.c
@@ -664,7 +664,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
printk("%s: Probing for AMD compatible flash...\n", map->name);
if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
- sizeof(table)/sizeof(table[0])))
+ ARRAY_SIZE(table)))
== -1) {
printk(KERN_WARNING
"%s: Found no AMD compatible device at location zero\n",
@@ -696,7 +696,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
base += (1 << temp.chipshift)) {
int numchips = temp.numchips;
table_pos[numchips] = probe_new_chip(mtd, base, chips,
- &temp, table, sizeof(table)/sizeof(table[0]));
+ &temp, table, ARRAY_SIZE(table));
}
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
diff --git a/trunk/drivers/mtd/chips/jedec_probe.c b/trunk/drivers/mtd/chips/jedec_probe.c
index edb306c03c0a..517ea33e7260 100644
--- a/trunk/drivers/mtd/chips/jedec_probe.c
+++ b/trunk/drivers/mtd/chips/jedec_probe.c
@@ -34,6 +34,7 @@
#define MANUFACTURER_MACRONIX 0x00C2
#define MANUFACTURER_NEC 0x0010
#define MANUFACTURER_PMC 0x009D
+#define MANUFACTURER_SHARP 0x00b0
#define MANUFACTURER_SST 0x00BF
#define MANUFACTURER_ST 0x0020
#define MANUFACTURER_TOSHIBA 0x0098
@@ -124,6 +125,9 @@
#define PM49FL004 0x006E
#define PM49FL008 0x006A
+/* Sharp */
+#define LH28F640BF 0x00b0
+
/* ST - www.st.com */
#define M29W800DT 0x00D7
#define M29W800DB 0x005B
@@ -1267,6 +1271,19 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {
ERASEINFO( 0x01000, 256 )
}
+ }, {
+ .mfr_id = MANUFACTURER_SHARP,
+ .dev_id = LH28F640BF,
+ .name = "LH28F640BF",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x40000,16),
+ }
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF512,
@@ -2035,7 +2052,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
DEBUG(MTD_DEBUG_LEVEL3,
"Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
- for (i=0; i\n");
diff --git a/trunk/drivers/mtd/cmdlinepart.c b/trunk/drivers/mtd/cmdlinepart.c
index 6b8bb2e4dcfd..a7a7bfe33879 100644
--- a/trunk/drivers/mtd/cmdlinepart.c
+++ b/trunk/drivers/mtd/cmdlinepart.c
@@ -42,7 +42,8 @@
/* special size referring to all the remaining space in a partition */
-#define SIZE_REMAINING 0xffffffff
+#define SIZE_REMAINING UINT_MAX
+#define OFFSET_CONTINUOUS UINT_MAX
struct cmdline_mtd_partition {
struct cmdline_mtd_partition *next;
@@ -75,7 +76,7 @@ static struct mtd_partition * newpart(char *s,
{
struct mtd_partition *parts;
unsigned long size;
- unsigned long offset = 0;
+ unsigned long offset = OFFSET_CONTINUOUS;
char *name;
int name_len;
unsigned char *extra_mem;
@@ -314,7 +315,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
{
for(i = 0, offset = 0; i < part->num_parts; i++)
{
- if (!part->parts[i].offset)
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
part->parts[i].offset = offset;
else
offset = part->parts[i].offset;
diff --git a/trunk/drivers/mtd/devices/blkmtd.c b/trunk/drivers/mtd/devices/blkmtd.c
index 04f864d238db..79f2e1f23ebd 100644
--- a/trunk/drivers/mtd/devices/blkmtd.c
+++ b/trunk/drivers/mtd/devices/blkmtd.c
@@ -28,8 +28,9 @@
#include
#include
#include
+#include
#include
-
+#include
#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
@@ -46,7 +47,7 @@ struct blkmtd_dev {
struct list_head list;
struct block_device *blkdev;
struct mtd_info mtd_info;
- struct semaphore wrbuf_mutex;
+ struct mutex wrbuf_mutex;
};
@@ -268,7 +269,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
if(end_len)
pagecnt++;
- down(&dev->wrbuf_mutex);
+ mutex_lock(&dev->wrbuf_mutex);
DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
start_len, len, end_len, pagecnt);
@@ -376,7 +377,7 @@ static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
blkmtd_write_out(bio);
DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
- up(&dev->wrbuf_mutex);
+ mutex_unlock(&dev->wrbuf_mutex);
if(retlen)
*retlen = thislen;
@@ -614,8 +615,6 @@ static struct mtd_erase_region_info *calc_erase_regions(
}
-extern dev_t __init name_to_dev_t(const char *line);
-
static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
{
struct block_device *bdev;
@@ -659,7 +658,7 @@ static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size
memset(dev, 0, sizeof(struct blkmtd_dev));
dev->blkdev = bdev;
if(!readonly) {
- init_MUTEX(&dev->wrbuf_mutex);
+ mutex_init(&dev->wrbuf_mutex);
}
dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/trunk/drivers/mtd/devices/block2mtd.c b/trunk/drivers/mtd/devices/block2mtd.c
index 7ff403b2a0a0..4160b8334c53 100644
--- a/trunk/drivers/mtd/devices/block2mtd.c
+++ b/trunk/drivers/mtd/devices/block2mtd.c
@@ -18,6 +18,7 @@
#include
#include
#include
+#include
#define VERSION "$Revision: 1.30 $"
@@ -31,7 +32,7 @@ struct block2mtd_dev {
struct list_head list;
struct block_device *blkdev;
struct mtd_info mtd;
- struct semaphore write_mutex;
+ struct mutex write_mutex;
};
@@ -134,9 +135,9 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
int err;
instr->state = MTD_ERASING;
- down(&dev->write_mutex);
+ mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
- up(&dev->write_mutex);
+ mutex_unlock(&dev->write_mutex);
if (err) {
ERROR("erase failed err = %d", err);
instr->state = MTD_ERASE_FAILED;
@@ -249,9 +250,9 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to + len > mtd->size)
len = mtd->size - to;
- down(&dev->write_mutex);
+ mutex_lock(&dev->write_mutex);
err = _block2mtd_write(dev, buf, to, len, retlen);
- up(&dev->write_mutex);
+ mutex_unlock(&dev->write_mutex);
if (err > 0)
err = 0;
return err;
@@ -310,7 +311,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
goto devinit_err;
}
- init_MUTEX(&dev->write_mutex);
+ mutex_init(&dev->write_mutex);
/* Setup the MTD structure */
/* make the name contain the block device in */
diff --git a/trunk/drivers/mtd/devices/doc2000.c b/trunk/drivers/mtd/devices/doc2000.c
index e4345cf744a2..23e7a5c7d2c1 100644
--- a/trunk/drivers/mtd/devices/doc2000.c
+++ b/trunk/drivers/mtd/devices/doc2000.c
@@ -20,6 +20,7 @@
#include
#include
#include
+#include
#include
#include
@@ -605,7 +606,7 @@ static void DoC2k_init(struct mtd_info *mtd)
this->curfloor = -1;
this->curchip = -1;
- init_MUTEX(&this->lock);
+ mutex_init(&this->lock);
/* Ident all the chips present. */
DoC_ScanChips(this, maxchips);
@@ -645,7 +646,7 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
if (from >= this->totlen)
return -EINVAL;
- down(&this->lock);
+ mutex_lock(&this->lock);
*retlen = 0;
while (left) {
@@ -774,7 +775,7 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
buf += len;
}
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -803,7 +804,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
if (to >= this->totlen)
return -EINVAL;
- down(&this->lock);
+ mutex_lock(&this->lock);
*retlen = 0;
while (left) {
@@ -873,7 +874,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EIO;
}
@@ -935,7 +936,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EIO;
}
@@ -956,7 +957,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
if (ret) {
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
}
@@ -966,7 +967,7 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
buf += len;
}
- up(&this->lock);
+ mutex_unlock(&this->lock);
return 0;
}
@@ -975,13 +976,13 @@ static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
u_char *eccbuf, struct nand_oobinfo *oobsel)
{
static char static_buf[512];
- static DECLARE_MUTEX(writev_buf_sem);
+ static DEFINE_MUTEX(writev_buf_mutex);
size_t totretlen = 0;
size_t thisvecofs = 0;
int ret= 0;
- down(&writev_buf_sem);
+ mutex_lock(&writev_buf_mutex);
while(count) {
size_t thislen, thisretlen;
@@ -1024,7 +1025,7 @@ static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
to += thislen;
}
- up(&writev_buf_sem);
+ mutex_unlock(&writev_buf_mutex);
*retlen = totretlen;
return ret;
}
@@ -1037,7 +1038,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
int len256 = 0, ret;
struct Nand *mychip;
- down(&this->lock);
+ mutex_lock(&this->lock);
mychip = &this->chips[ofs >> this->chipshift];
@@ -1083,7 +1084,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
ret = DoC_WaitReady(this);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -1197,10 +1198,10 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
struct DiskOnChip *this = mtd->priv;
int ret;
- down(&this->lock);
+ mutex_lock(&this->lock);
ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -1214,10 +1215,10 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
struct Nand *mychip;
int status;
- down(&this->lock);
+ mutex_lock(&this->lock);
if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EINVAL;
}
@@ -1265,7 +1266,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
callback:
mtd_erase_callback(instr);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return 0;
}
diff --git a/trunk/drivers/mtd/devices/lart.c b/trunk/drivers/mtd/devices/lart.c
index 1e876fcb0408..29b0ddaa324e 100644
--- a/trunk/drivers/mtd/devices/lart.c
+++ b/trunk/drivers/mtd/devices/lart.c
@@ -581,8 +581,6 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
/***************************************************************************************************/
-#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
-
static struct mtd_info mtd;
static struct mtd_erase_region_info erase_regions[] = {
@@ -640,7 +638,7 @@ int __init lart_flash_init (void)
mtd.flags = MTD_CAP_NORFLASH;
mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
- mtd.numeraseregions = NB_OF (erase_regions);
+ mtd.numeraseregions = ARRAY_SIZE(erase_regions);
mtd.eraseregions = erase_regions;
mtd.erase = flash_erase;
mtd.read = flash_read;
@@ -670,9 +668,9 @@ int __init lart_flash_init (void)
result,mtd.eraseregions[result].numblocks);
#ifdef HAVE_PARTITIONS
- printk ("\npartitions = %d\n",NB_OF (lart_partitions));
+ printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
- for (result = 0; result < NB_OF (lart_partitions); result++)
+ for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
printk (KERN_DEBUG
"\n\n"
"lart_partitions[%d].name = %s\n"
@@ -687,7 +685,7 @@ int __init lart_flash_init (void)
#ifndef HAVE_PARTITIONS
result = add_mtd_device (&mtd);
#else
- result = add_mtd_partitions (&mtd,lart_partitions,NB_OF (lart_partitions));
+ result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
#endif
return (result);
diff --git a/trunk/drivers/mtd/devices/m25p80.c b/trunk/drivers/mtd/devices/m25p80.c
index d5f24089be71..04e65d5dae00 100644
--- a/trunk/drivers/mtd/devices/m25p80.c
+++ b/trunk/drivers/mtd/devices/m25p80.c
@@ -186,7 +186,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
struct m25p *flash = mtd_to_m25p(mtd);
u32 addr,len;
- DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+ DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
flash->spi->dev.bus_id, __FUNCTION__, "at",
(u32)instr->addr, instr->len);
diff --git a/trunk/drivers/mtd/devices/ms02-nv.c b/trunk/drivers/mtd/devices/ms02-nv.c
index 0ff2e4378244..485f663493d2 100644
--- a/trunk/drivers/mtd/devices/ms02-nv.c
+++ b/trunk/drivers/mtd/devices/ms02-nv.c
@@ -308,7 +308,7 @@ static int __init ms02nv_init(void)
break;
}
- for (i = 0; i < (sizeof(ms02nv_addrs) / sizeof(*ms02nv_addrs)); i++)
+ for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
count++;
diff --git a/trunk/drivers/mtd/inftlcore.c b/trunk/drivers/mtd/inftlcore.c
index 8a544890173d..a3b92479719d 100644
--- a/trunk/drivers/mtd/inftlcore.c
+++ b/trunk/drivers/mtd/inftlcore.c
@@ -47,9 +47,6 @@
*/
#define MAX_LOOPS 10000
-extern void INFTL_dumptables(struct INFTLrecord *inftl);
-extern void INFTL_dumpVUchains(struct INFTLrecord *inftl);
-
static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct INFTLrecord *inftl;
@@ -132,7 +129,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
#ifdef PSYCHO_DEBUG
- printk(KERN_INFO "INFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
+ printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
#endif
return;
}
@@ -885,8 +882,6 @@ static struct mtd_blktrans_ops inftl_tr = {
.owner = THIS_MODULE,
};
-extern char inftlmountrev[];
-
static int __init init_inftl(void)
{
printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.19 $, "
diff --git a/trunk/drivers/mtd/maps/alchemy-flash.c b/trunk/drivers/mtd/maps/alchemy-flash.c
index a57791a6ce40..b933a2a27b18 100644
--- a/trunk/drivers/mtd/maps/alchemy-flash.c
+++ b/trunk/drivers/mtd/maps/alchemy-flash.c
@@ -126,8 +126,6 @@ static struct mtd_partition alchemy_partitions[] = {
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
static struct mtd_info *mymtd;
int __init alchemy_mtd_init(void)
@@ -154,7 +152,7 @@ int __init alchemy_mtd_init(void)
* Static partition definition selection
*/
parts = alchemy_partitions;
- nb_parts = NB_OF(alchemy_partitions);
+ nb_parts = ARRAY_SIZE(alchemy_partitions);
alchemy_map.size = window_size;
/*
diff --git a/trunk/drivers/mtd/maps/cfi_flagadm.c b/trunk/drivers/mtd/maps/cfi_flagadm.c
index 6a8c0415bde8..fd0f0d3187de 100644
--- a/trunk/drivers/mtd/maps/cfi_flagadm.c
+++ b/trunk/drivers/mtd/maps/cfi_flagadm.c
@@ -86,7 +86,7 @@ struct mtd_partition flagadm_parts[] = {
}
};
-#define PARTITION_COUNT (sizeof(flagadm_parts)/sizeof(struct mtd_partition))
+#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
static struct mtd_info *mymtd;
diff --git a/trunk/drivers/mtd/maps/dbox2-flash.c b/trunk/drivers/mtd/maps/dbox2-flash.c
index 49d90542fc75..652813cd6c2d 100644
--- a/trunk/drivers/mtd/maps/dbox2-flash.c
+++ b/trunk/drivers/mtd/maps/dbox2-flash.c
@@ -57,7 +57,7 @@ static struct mtd_partition partition_info[]= {
}
};
-#define NUM_PARTITIONS (sizeof(partition_info) / sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_ADDR 0x10000000
#define WINDOW_SIZE 0x800000
diff --git a/trunk/drivers/mtd/maps/dilnetpc.c b/trunk/drivers/mtd/maps/dilnetpc.c
index efb221692641..c299d10b33e6 100644
--- a/trunk/drivers/mtd/maps/dilnetpc.c
+++ b/trunk/drivers/mtd/maps/dilnetpc.c
@@ -300,7 +300,7 @@ static struct mtd_partition partition_info[]=
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
static struct mtd_info *mymtd;
static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
@@ -345,7 +345,7 @@ static struct mtd_partition higlvl_partition_info[]=
},
};
-#define NUM_HIGHLVL_PARTITIONS (sizeof(higlvl_partition_info)/sizeof(partition_info[0]))
+#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
static int dnp_adnp_probe(void)
diff --git a/trunk/drivers/mtd/maps/dmv182.c b/trunk/drivers/mtd/maps/dmv182.c
index b993ac01a9a5..2bb3c0f0f970 100644
--- a/trunk/drivers/mtd/maps/dmv182.c
+++ b/trunk/drivers/mtd/maps/dmv182.c
@@ -99,7 +99,7 @@ static struct mtd_info *this_mtd;
static int __init init_svme182(void)
{
struct mtd_partition *partitions;
- int num_parts = sizeof(svme182_partitions) / sizeof(struct mtd_partition);
+ int num_parts = ARRAY_SIZE(svme182_partitions);
partitions = svme182_partitions;
diff --git a/trunk/drivers/mtd/maps/h720x-flash.c b/trunk/drivers/mtd/maps/h720x-flash.c
index 319094821101..0667101ccbe1 100644
--- a/trunk/drivers/mtd/maps/h720x-flash.c
+++ b/trunk/drivers/mtd/maps/h720x-flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition h720x_partitions[] = {
}
};
-#define NUM_PARTITIONS (sizeof(h720x_partitions)/sizeof(h720x_partitions[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
static int nr_mtd_parts;
static struct mtd_partition *mtd_parts;
diff --git a/trunk/drivers/mtd/maps/netsc520.c b/trunk/drivers/mtd/maps/netsc520.c
index 33060a315722..ed215470158b 100644
--- a/trunk/drivers/mtd/maps/netsc520.c
+++ b/trunk/drivers/mtd/maps/netsc520.c
@@ -76,7 +76,7 @@ static struct mtd_partition partition_info[]={
.size = 0x80000
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_SIZE 0x00100000
#define WINDOW_ADDR 0x00200000
@@ -88,7 +88,7 @@ static struct map_info netsc520_map = {
.phys = WINDOW_ADDR,
};
-#define NUM_FLASH_BANKS (sizeof(netsc520_map)/sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
static struct mtd_info *mymtd;
diff --git a/trunk/drivers/mtd/maps/nettel.c b/trunk/drivers/mtd/maps/nettel.c
index 632eb2aa968f..54a3102ab19a 100644
--- a/trunk/drivers/mtd/maps/nettel.c
+++ b/trunk/drivers/mtd/maps/nettel.c
@@ -128,8 +128,7 @@ static struct mtd_partition nettel_amd_partitions[] = {
}
};
-#define NUM_AMD_PARTITIONS \
- (sizeof(nettel_amd_partitions)/sizeof(nettel_amd_partitions[0]))
+#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
/****************************************************************************/
diff --git a/trunk/drivers/mtd/maps/ocotea.c b/trunk/drivers/mtd/maps/ocotea.c
index c223514ca2eb..a21fcd195ab4 100644
--- a/trunk/drivers/mtd/maps/ocotea.c
+++ b/trunk/drivers/mtd/maps/ocotea.c
@@ -58,8 +58,6 @@ static struct mtd_partition ocotea_large_partitions[] = {
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
int __init init_ocotea(void)
{
u8 fpga0_reg;
@@ -97,7 +95,7 @@ int __init init_ocotea(void)
if (flash) {
flash->owner = THIS_MODULE;
add_mtd_partitions(flash, ocotea_small_partitions,
- NB_OF(ocotea_small_partitions));
+ ARRAY_SIZE(ocotea_small_partitions));
} else {
printk("map probe failed for flash\n");
return -ENXIO;
@@ -118,7 +116,7 @@ int __init init_ocotea(void)
if (flash) {
flash->owner = THIS_MODULE;
add_mtd_partitions(flash, ocotea_large_partitions,
- NB_OF(ocotea_large_partitions));
+ ARRAY_SIZE(ocotea_large_partitions));
} else {
printk("map probe failed for flash\n");
return -ENXIO;
diff --git a/trunk/drivers/mtd/maps/pci.c b/trunk/drivers/mtd/maps/pci.c
index 21822c2edbe4..d2ab1bae9c34 100644
--- a/trunk/drivers/mtd/maps/pci.c
+++ b/trunk/drivers/mtd/maps/pci.c
@@ -334,9 +334,6 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
release:
- if (mtd)
- map_destroy(mtd);
-
if (map) {
map->exit(dev, map);
kfree(map);
diff --git a/trunk/drivers/mtd/maps/pcmciamtd.c b/trunk/drivers/mtd/maps/pcmciamtd.c
index f988c817e196..8bbc751a6021 100644
--- a/trunk/drivers/mtd/maps/pcmciamtd.c
+++ b/trunk/drivers/mtd/maps/pcmciamtd.c
@@ -616,7 +616,7 @@ static void pcmciamtd_config(dev_link_t *link)
} else if(mem_type == 2) {
mtd = do_map_probe("map_rom", &dev->pcmcia_map);
} else {
- for(i = 0; i < sizeof(probes) / sizeof(char *); i++) {
+ for(i = 0; i < ARRAY_SIZE(probes); i++) {
DEBUG(1, "Trying %s", probes[i]);
mtd = do_map_probe(probes[i], &dev->pcmcia_map);
if(mtd)
diff --git a/trunk/drivers/mtd/maps/redwood.c b/trunk/drivers/mtd/maps/redwood.c
index 5b76ed886185..50b14033613f 100644
--- a/trunk/drivers/mtd/maps/redwood.c
+++ b/trunk/drivers/mtd/maps/redwood.c
@@ -121,8 +121,7 @@ struct map_info redwood_flash_map = {
};
-#define NUM_REDWOOD_FLASH_PARTITIONS \
- (sizeof(redwood_flash_partitions)/sizeof(redwood_flash_partitions[0]))
+#define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions)
static struct mtd_info *redwood_mtd;
diff --git a/trunk/drivers/mtd/maps/sbc8240.c b/trunk/drivers/mtd/maps/sbc8240.c
index 225cdd9ba5b2..350286dc1d2e 100644
--- a/trunk/drivers/mtd/maps/sbc8240.c
+++ b/trunk/drivers/mtd/maps/sbc8240.c
@@ -66,7 +66,7 @@ static struct map_info sbc8240_map[2] = {
}
};
-#define NUM_FLASH_BANKS (sizeof(sbc8240_map) / sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map)
/*
* The following defines the partition layout of SBC8240 boards.
@@ -125,8 +125,6 @@ static struct mtd_partition sbc8240_fs_partitions [] = {
}
};
-#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
-
/* trivial struct to describe partition information */
struct mtd_part_def
{
@@ -190,10 +188,10 @@ int __init init_sbc8240_mtd (void)
#ifdef CONFIG_MTD_PARTITIONS
sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
sbc8240_part_banks[0].type = "static image";
- sbc8240_part_banks[0].nums = NB_OF(sbc8240_uboot_partitions);
+ sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions);
sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
sbc8240_part_banks[1].type = "static file system";
- sbc8240_part_banks[1].nums = NB_OF(sbc8240_fs_partitions);
+ sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions);
for (i = 0; i < NUM_FLASH_BANKS; i++) {
diff --git a/trunk/drivers/mtd/maps/sc520cdp.c b/trunk/drivers/mtd/maps/sc520cdp.c
index ed92afadd8a9..e8c130e1efd3 100644
--- a/trunk/drivers/mtd/maps/sc520cdp.c
+++ b/trunk/drivers/mtd/maps/sc520cdp.c
@@ -107,7 +107,7 @@ static struct map_info sc520cdp_map[] = {
},
};
-#define NUM_FLASH_BANKS (sizeof(sc520cdp_map)/sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
static struct mtd_info *mymtd[NUM_FLASH_BANKS];
static struct mtd_info *merged_mtd;
diff --git a/trunk/drivers/mtd/maps/scx200_docflash.c b/trunk/drivers/mtd/maps/scx200_docflash.c
index 2c91dff8bb60..28b8a571a91a 100644
--- a/trunk/drivers/mtd/maps/scx200_docflash.c
+++ b/trunk/drivers/mtd/maps/scx200_docflash.c
@@ -70,7 +70,7 @@ static struct mtd_partition partition_info[] = {
.size = 0x80000
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#endif
diff --git a/trunk/drivers/mtd/maps/sharpsl-flash.c b/trunk/drivers/mtd/maps/sharpsl-flash.c
index 999f4bb3d845..12fe53c0d2fc 100644
--- a/trunk/drivers/mtd/maps/sharpsl-flash.c
+++ b/trunk/drivers/mtd/maps/sharpsl-flash.c
@@ -49,8 +49,6 @@ static struct mtd_partition sharpsl_partitions[1] = {
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
int __init init_sharpsl(void)
{
struct mtd_partition *parts;
@@ -92,7 +90,7 @@ int __init init_sharpsl(void)
}
parts = sharpsl_partitions;
- nb_parts = NB_OF(sharpsl_partitions);
+ nb_parts = ARRAY_SIZE(sharpsl_partitions);
printk(KERN_NOTICE "Using %s partision definition\n", part_type);
add_mtd_partitions(mymtd, parts, nb_parts);
diff --git a/trunk/drivers/mtd/maps/ts5500_flash.c b/trunk/drivers/mtd/maps/ts5500_flash.c
index 4b372bcb17f1..a7422c200567 100644
--- a/trunk/drivers/mtd/maps/ts5500_flash.c
+++ b/trunk/drivers/mtd/maps/ts5500_flash.c
@@ -64,7 +64,7 @@ static struct mtd_partition ts5500_partitions[] = {
}
};
-#define NUM_PARTITIONS (sizeof(ts5500_partitions)/sizeof(struct mtd_partition))
+#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
static struct mtd_info *mymtd;
diff --git a/trunk/drivers/mtd/maps/uclinux.c b/trunk/drivers/mtd/maps/uclinux.c
index 79d92808b766..f7264dc2ac9b 100644
--- a/trunk/drivers/mtd/maps/uclinux.c
+++ b/trunk/drivers/mtd/maps/uclinux.c
@@ -37,7 +37,7 @@ struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
-#define NUM_PARTITIONS (sizeof(uclinux_romfs) / sizeof(uclinux_romfs[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
/****************************************************************************/
diff --git a/trunk/drivers/mtd/maps/vmax301.c b/trunk/drivers/mtd/maps/vmax301.c
index e0063941c0df..b3e487395435 100644
--- a/trunk/drivers/mtd/maps/vmax301.c
+++ b/trunk/drivers/mtd/maps/vmax301.c
@@ -182,7 +182,7 @@ int __init init_vmax301(void)
}
}
- if (!vmax_mtd[1] && !vmax_mtd[2]) {
+ if (!vmax_mtd[0] && !vmax_mtd[1]) {
iounmap((void *)iomapadr);
return -ENXIO;
}
diff --git a/trunk/drivers/mtd/mtd_blkdevs.c b/trunk/drivers/mtd/mtd_blkdevs.c
index 840dd66ce2dc..458d3c8ae1ee 100644
--- a/trunk/drivers/mtd/mtd_blkdevs.c
+++ b/trunk/drivers/mtd/mtd_blkdevs.c
@@ -19,12 +19,12 @@
#include
#include
#include
-#include
+#include
#include
static LIST_HEAD(blktrans_majors);
-extern struct semaphore mtd_table_mutex;
+extern struct mutex mtd_table_mutex;
extern struct mtd_info *mtd_table[];
struct mtd_blkcore_priv {
@@ -122,9 +122,9 @@ static int mtd_blktrans_thread(void *arg)
spin_unlock_irq(rq->queue_lock);
- down(&dev->sem);
+ mutex_lock(&dev->lock);
res = do_blktrans_request(tr, dev, req);
- up(&dev->sem);
+ mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@@ -235,8 +235,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
int last_devnum = -1;
struct gendisk *gd;
- if (!down_trylock(&mtd_table_mutex)) {
- up(&mtd_table_mutex);
+ if (!!mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
BUG();
}
@@ -267,7 +267,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return -EBUSY;
}
- init_MUTEX(&new->sem);
+ mutex_init(&new->lock);
list_add_tail(&new->list, &tr->devs);
added:
if (!tr->writesect)
@@ -313,8 +313,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
- if (!down_trylock(&mtd_table_mutex)) {
- up(&mtd_table_mutex);
+ if (!!mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
BUG();
}
@@ -378,14 +378,14 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
ret = register_blkdev(tr->major, tr->name);
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
kfree(tr->blkcore_priv);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
spin_lock_init(&tr->blkcore_priv->queue_lock);
@@ -396,7 +396,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!tr->blkcore_priv->rq) {
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return -ENOMEM;
}
@@ -407,7 +407,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -419,7 +419,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->add_mtd(tr, mtd_table[i]);
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return 0;
}
@@ -428,7 +428,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
struct list_head *this, *next;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
/* Clean up the kernel thread */
tr->blkcore_priv->exiting = 1;
@@ -446,7 +446,7 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
kfree(tr->blkcore_priv);
diff --git a/trunk/drivers/mtd/mtdblock.c b/trunk/drivers/mtd/mtdblock.c
index e84756644fd1..2cef280e388c 100644
--- a/trunk/drivers/mtd/mtdblock.c
+++ b/trunk/drivers/mtd/mtdblock.c
@@ -19,11 +19,13 @@
#include
#include
+#include
+
static struct mtdblk_dev {
struct mtd_info *mtd;
int count;
- struct semaphore cache_sem;
+ struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
@@ -284,7 +286,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
mtdblk->count = 1;
mtdblk->mtd = mtd;
- init_MUTEX (&mtdblk->cache_sem);
+ mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
mtdblk->mtd->erasesize) {
@@ -306,9 +308,9 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
- down(&mtdblk->cache_sem);
+ mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
- up(&mtdblk->cache_sem);
+ mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
@@ -327,9 +329,9 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
- down(&mtdblk->cache_sem);
+ mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
- up(&mtdblk->cache_sem);
+ mutex_unlock(&mtdblk->cache_mutex);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
diff --git a/trunk/drivers/mtd/mtdcore.c b/trunk/drivers/mtd/mtdcore.c
index dade02ab0687..9905870f56e5 100644
--- a/trunk/drivers/mtd/mtdcore.c
+++ b/trunk/drivers/mtd/mtdcore.c
@@ -19,15 +19,13 @@
#include
#include
#include
-#ifdef CONFIG_PROC_FS
#include
-#endif
#include
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
-DECLARE_MUTEX(mtd_table_mutex);
+DEFINE_MUTEX(mtd_table_mutex);
struct mtd_info *mtd_table[MAX_MTD_DEVICES];
EXPORT_SYMBOL_GPL(mtd_table_mutex);
@@ -49,7 +47,7 @@ int add_mtd_device(struct mtd_info *mtd)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
for (i=0; i < MAX_MTD_DEVICES; i++)
if (!mtd_table[i]) {
@@ -67,7 +65,7 @@ int add_mtd_device(struct mtd_info *mtd)
not->add(mtd);
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
@@ -76,7 +74,7 @@ int add_mtd_device(struct mtd_info *mtd)
return 0;
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -94,7 +92,7 @@ int del_mtd_device (struct mtd_info *mtd)
{
int ret;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
if (mtd_table[mtd->index] != mtd) {
ret = -ENODEV;
@@ -118,7 +116,7 @@ int del_mtd_device (struct mtd_info *mtd)
ret = 0;
}
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -135,7 +133,7 @@ void register_mtd_user (struct mtd_notifier *new)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
list_add(&new->list, &mtd_notifiers);
@@ -145,7 +143,7 @@ void register_mtd_user (struct mtd_notifier *new)
if (mtd_table[i])
new->add(mtd_table[i]);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
}
/**
@@ -162,7 +160,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
@@ -171,7 +169,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
old->remove(mtd_table[i]);
list_del(&old->list);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return 0;
}
@@ -193,7 +191,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
struct mtd_info *ret = NULL;
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
if (num == -1) {
for (i=0; i< MAX_MTD_DEVICES; i++)
@@ -211,7 +209,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
if (ret)
ret->usecount++;
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -219,9 +217,9 @@ void put_mtd_device(struct mtd_info *mtd)
{
int c;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
c = --mtd->usecount;
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
BUG_ON(c < 0);
module_put(mtd->owner);
@@ -296,10 +294,11 @@ EXPORT_SYMBOL(unregister_mtd_user);
EXPORT_SYMBOL(default_mtd_writev);
EXPORT_SYMBOL(default_mtd_readv);
+#ifdef CONFIG_PROC_FS
+
/*====================================================================*/
/* Support for /proc/mtd */
-#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_mtd;
static inline int mtd_proc_info (char *buf, int i)
@@ -319,7 +318,7 @@ static int mtd_read_proc (char *page, char **start, off_t off, int count,
int len, l, i;
off_t begin = 0;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
for (i=0; i< MAX_MTD_DEVICES; i++) {
@@ -337,38 +336,34 @@ static int mtd_read_proc (char *page, char **start, off_t off, int count,
*eof = 1;
done:
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
if (off >= len+begin)
return 0;
*start = page + (off-begin);
return ((count < begin+len-off) ? count : begin+len-off);
}
-#endif /* CONFIG_PROC_FS */
-
/*====================================================================*/
/* Init code */
static int __init init_mtd(void)
{
-#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
proc_mtd->read_proc = mtd_read_proc;
-#endif
return 0;
}
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PROC_FS
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
-#endif
}
module_init(init_mtd);
module_exit(cleanup_mtd);
+#endif /* CONFIG_PROC_FS */
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse ");
diff --git a/trunk/drivers/mtd/nand/Kconfig b/trunk/drivers/mtd/nand/Kconfig
index 1fc4c134d939..cfe288a6e853 100644
--- a/trunk/drivers/mtd/nand/Kconfig
+++ b/trunk/drivers/mtd/nand/Kconfig
@@ -178,17 +178,16 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
Even if you leave this disabled, you can enable BBT writes at module
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
-
- config MTD_NAND_SHARPSL
- bool "Support for NAND Flash on Sharp SL Series (C7xx + others)"
- depends on MTD_NAND && ARCH_PXA
-
- config MTD_NAND_NANDSIM
- bool "Support for NAND Flash Simulator"
- depends on MTD_NAND && MTD_PARTITIONS
+config MTD_NAND_SHARPSL
+ tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+ depends on MTD_NAND && ARCH_PXA
+
+config MTD_NAND_NANDSIM
+ tristate "Support for NAND Flash Simulator"
+ depends on MTD_NAND && MTD_PARTITIONS
help
The simulator may simulate verious NAND flash chips for the
MTD nand layer.
-
+
endmenu
diff --git a/trunk/drivers/mtd/nand/au1550nd.c b/trunk/drivers/mtd/nand/au1550nd.c
index 201e1362da14..bde3550910a2 100644
--- a/trunk/drivers/mtd/nand/au1550nd.c
+++ b/trunk/drivers/mtd/nand/au1550nd.c
@@ -55,8 +55,6 @@ static const struct mtd_partition partition_info[] = {
.size = MTDPART_SIZ_FULL
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
/**
* au_read_byte - read one byte from the chip
@@ -462,7 +460,7 @@ int __init au1xxx_nand_init (void)
}
/* Register the partitions */
- add_mtd_partitions(au1550_mtd, partition_info, NB_OF(partition_info));
+ add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
return 0;
diff --git a/trunk/drivers/mtd/nand/nand_base.c b/trunk/drivers/mtd/nand/nand_base.c
index 5d222460b42a..95e96fa1fceb 100644
--- a/trunk/drivers/mtd/nand/nand_base.c
+++ b/trunk/drivers/mtd/nand/nand_base.c
@@ -80,6 +80,7 @@
#include