diff --git a/[refs] b/[refs]
index 9deba3140474..b42203aecea9 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 80ff8a805113850a3ffafcc8e6eaa1fdd51b02f3
+refs/heads/master: 038200cfdc6467fa8100c5b9c3b81730f0158370
diff --git a/trunk/Documentation/00-INDEX b/trunk/Documentation/00-INDEX
index d273b557a934..40ac7759c3bb 100644
--- a/trunk/Documentation/00-INDEX
+++ b/trunk/Documentation/00-INDEX
@@ -126,16 +126,18 @@ devices.txt
- plain ASCII listing of all the nodes in /dev/ with major minor #'s.
digiepca.txt
- info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
+dnotify.txt
+ - info about directory notification in Linux.
dontdiff
- file containing a list of files that should never be diff'ed.
driver-model/
- directory with info about Linux driver model.
+drivers/
+ - directory with driver documentation (currently only EDAC).
dvb/
- info on Linux Digital Video Broadcast (DVB) subsystem.
early-userspace/
- info about initramfs, klibc, and userspace early during boot.
-edac.txt
- - information on EDAC - Error Detection And Correction
eisa.txt
- info on EISA bus support.
exception.txt
@@ -332,8 +334,20 @@ rtc.txt
- notes on how to use the Real Time Clock (aka CMOS clock) driver.
s390/
- directory with info on using Linux on the IBM S390.
-scheduler/
- - directory with info on the scheduler.
+sched-arch.txt
+ - CPU Scheduler implementation hints for architecture specific code.
+sched-coding.txt
+ - reference for various scheduler-related methods in the O(1) scheduler.
+sched-design.txt
+ - goals, design and implementation of the Linux O(1) scheduler.
+sched-design-CFS.txt
+ - goals, design and implementation of the Complete Fair Scheduler.
+sched-domains.txt
+ - information on scheduling domains.
+sched-nice-design.txt
+ - How and why the scheduler's nice levels are implemented.
+sched-stats.txt
+ - information on schedstats (Linux Scheduler Statistics).
scsi/
- directory with info on Linux scsi support.
serial/
@@ -346,6 +360,8 @@ sgi-visws.txt
- short blurb on the SGI Visual Workstations.
sh/
- directory with info on porting Linux to a new architecture.
+sharedsubtree.txt
+ - a description of shared subtrees for namespaces.
smart-config.txt
- description of the Smart Config makefile feature.
sony-laptop.txt
diff --git a/trunk/Documentation/ABI/testing/sysfs-kernel-uids b/trunk/Documentation/ABI/testing/sysfs-kernel-uids
index 28f14695a852..648d65dbc0e7 100644
--- a/trunk/Documentation/ABI/testing/sysfs-kernel-uids
+++ b/trunk/Documentation/ABI/testing/sysfs-kernel-uids
@@ -11,4 +11,4 @@ Description:
example would be, if User A has shares = 1024 and user
B has shares = 2048, User B will get twice the CPU
bandwidth user A will. For more details refer
- Documentation/scheduler/sched-design-CFS.txt
+ Documentation/sched-design-CFS.txt
diff --git a/trunk/Documentation/BUG-HUNTING b/trunk/Documentation/BUG-HUNTING
index 65022a87bf17..6c816751b868 100644
--- a/trunk/Documentation/BUG-HUNTING
+++ b/trunk/Documentation/BUG-HUNTING
@@ -214,23 +214,6 @@ And recompile the kernel with CONFIG_DEBUG_INFO enabled:
gdb vmlinux
(gdb) p vt_ioctl
(gdb) l *(0x
+ 0xda8)
-or, as one command
- (gdb) l *(vt_ioctl + 0xda8)
-
-If you have a call trace, such as :-
->Call Trace:
-> [] :jbd:log_wait_commit+0xa3/0xf5
-> [] autoremove_wake_function+0x0/0x2e
-> [] :jbd:journal_stop+0x1be/0x1ee
-> ...
-this shows the problem in the :jbd: module. You can load that module in gdb
-and list the relevant code.
- gdb fs/jbd/jbd.ko
- (gdb) p log_wait_commit
- (gdb) l *(0x + 0xa3)
-or
- (gdb) l *(log_wait_commit + 0xa3)
-
Another very useful option of the Kernel Hacking section in menuconfig is
Debug memory allocations. This will help you see whether data has been
diff --git a/trunk/Documentation/DocBook/genericirq.tmpl b/trunk/Documentation/DocBook/genericirq.tmpl
index 3a882d9a90a9..4215f69ce7e6 100644
--- a/trunk/Documentation/DocBook/genericirq.tmpl
+++ b/trunk/Documentation/DocBook/genericirq.tmpl
@@ -172,7 +172,7 @@
Chiplevel hardware encapsulation
-
+
Interrupt control flow
Each interrupt is described by an interrupt descriptor structure
@@ -190,7 +190,7 @@
referenced by the assigned chip descriptor structure.
-
+
Highlevel Driver API
The highlevel Driver API consists of following functions:
@@ -210,7 +210,7 @@
See the autogenerated function documentation for details.
-
+
Highlevel IRQ flow handlers
The generic layer provides a set of pre-defined irq-flow methods:
@@ -224,9 +224,9 @@
specific) are assigned to specific interrupts by the architecture
either during bootup or during device initialization.
-
+
Default flow implementations
-
+
Helper functions
The helper functions call the chip primitives and
@@ -267,9 +267,9 @@ noop(irq)
-
+
Default flow handler implementations
-
+
Default Level IRQ flow handler
handle_level_irq provides a generic implementation
@@ -284,7 +284,7 @@ desc->chip->end();
-
+
Default Edge IRQ flow handler
handle_edge_irq provides a generic implementation
@@ -311,7 +311,7 @@ desc->chip->end();
-
+
Default simple IRQ flow handler
handle_simple_irq provides a generic implementation
@@ -328,7 +328,7 @@ handle_IRQ_event(desc->action);
-
+
Default per CPU flow handler
handle_percpu_irq provides a generic implementation
@@ -349,7 +349,7 @@ desc->chip->end();
-
+
Quirks and optimizations
The generic functions are intended for 'clean' architectures and chips,
@@ -358,7 +358,7 @@ desc->chip->end();
overriding the highlevel irq-flow handler.
-
+
Delayed interrupt disable
This per interrupt selectable feature, which was introduced by Russell
@@ -380,7 +380,7 @@ desc->chip->end();
-
+
Chiplevel hardware encapsulation
The chip level hardware descriptor structure irq_chip
diff --git a/trunk/Documentation/DocBook/kernel-api.tmpl b/trunk/Documentation/DocBook/kernel-api.tmpl
index 059aaf20951a..77436d735013 100644
--- a/trunk/Documentation/DocBook/kernel-api.tmpl
+++ b/trunk/Documentation/DocBook/kernel-api.tmpl
@@ -165,7 +165,6 @@ X!Ilib/string.c
!Emm/vmalloc.c
!Imm/page_alloc.c
!Emm/mempool.c
-!Emm/dmapool.c
!Emm/page-writeback.c
!Emm/truncate.c
@@ -372,6 +371,7 @@ X!Iinclude/linux/device.h
!Edrivers/base/class.c
!Edrivers/base/firmware_class.c
!Edrivers/base/transport_class.c
+!Edrivers/base/dmapool.c
page_cgroup|
- | | | |
- +---------------+ +---------------+
-
- (Figure 1: Hierarchy of Accounting)
-
-
-Figure 1 shows the important aspects of the controller
-
-1. Accounting happens per cgroup
-2. Each mm_struct knows about which cgroup it belongs to
-3. Each page has a pointer to the page_cgroup, which in turn knows the
- cgroup it belongs to
-
-The accounting is done as follows: mem_cgroup_charge() is invoked to setup
-the necessary data structures and check if the cgroup that is being charged
-is over its limit. If it is then reclaim is invoked on the cgroup.
-More details can be found in the reclaim section of this document.
-If everything goes well, a page meta-data-structure called page_cgroup is
-allocated and associated with the page. This routine also adds the page to
-the per cgroup LRU.
-
-2.2.1 Accounting details
-
-All mapped pages (RSS) and unmapped user pages (Page Cache) are accounted.
-RSS pages are accounted at the time of page_add_*_rmap() unless they've already
-been accounted for earlier. A file page will be accounted for as Page Cache;
-it's mapped into the page tables of a process, duplicate accounting is carefully
-avoided. Page Cache pages are accounted at the time of add_to_page_cache().
-The corresponding routines that remove a page from the page tables or removes
-a page from Page Cache is used to decrement the accounting counters of the
-cgroup.
-
-2.3 Shared Page Accounting
-
-Shared pages are accounted on the basis of the first touch approach. The
-cgroup that first touches a page is accounted for the page. The principle
-behind this approach is that a cgroup that aggressively uses a shared
-page will eventually get charged for it (once it is uncharged from
-the cgroup that brought it in -- this will happen on memory pressure).
-
-2.4 Reclaim
-
-Each cgroup maintains a per cgroup LRU that consists of an active
-and inactive list. When a cgroup goes over its limit, we first try
-to reclaim memory from the cgroup so as to make space for the new
-pages that the cgroup has touched. If the reclaim is unsuccessful,
-an OOM routine is invoked to select and kill the bulkiest task in the
-cgroup.
-
-The reclaim algorithm has not been modified for cgroups, except that
-pages that are selected for reclaiming come from the per cgroup LRU
-list.
-
-2. Locking
-
-The memory controller uses the following hierarchy
-
-1. zone->lru_lock is used for selecting pages to be isolated
-2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone)
-3. lock_page_cgroup() is used to protect page->page_cgroup
-
-3. User Interface
-
-0. Configuration
-
-a. Enable CONFIG_CGROUPS
-b. Enable CONFIG_RESOURCE_COUNTERS
-c. Enable CONFIG_CGROUP_MEM_CONT
-
-1. Prepare the cgroups
-# mkdir -p /cgroups
-# mount -t cgroup none /cgroups -o memory
-
-2. Make the new group and move bash into it
-# mkdir /cgroups/0
-# echo $$ > /cgroups/0/tasks
-
-Since now we're in the 0 cgroup,
-We can alter the memory limit:
-# echo -n 4M > /cgroups/0/memory.limit_in_bytes
-
-NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
-mega or gigabytes.
-
-# cat /cgroups/0/memory.limit_in_bytes
-4194304 Bytes
-
-NOTE: The interface has now changed to display the usage in bytes
-instead of pages
-
-We can check the usage:
-# cat /cgroups/0/memory.usage_in_bytes
-1216512 Bytes
-
-A successful write to this file does not guarantee a successful set of
-this limit to the value written into the file. This can be due to a
-number of factors, such as rounding up to page boundaries or the total
-availability of memory on the system. The user is required to re-read
-this file after a write to guarantee the value committed by the kernel.
-
-# echo -n 1 > memory.limit_in_bytes
-# cat memory.limit_in_bytes
-4096 Bytes
-
-The memory.failcnt field gives the number of times that the cgroup limit was
-exceeded.
-
-The memory.stat file gives accounting information. Now, the number of
-caches, RSS and Active pages/Inactive pages are shown.
-
-The memory.force_empty gives an interface to drop *all* charges by force.
-
-# echo -n 1 > memory.force_empty
-
-will drop all charges in cgroup. Currently, this is maintained for test.
-
-4. Testing
-
-Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
-Apart from that v6 has been tested with several applications and regular
-daily use. The controller has also been tested on the PPC64, x86_64 and
-UML platforms.
-
-4.1 Troubleshooting
-
-Sometimes a user might find that the application under a cgroup is
-terminated. There are several causes for this:
-
-1. The cgroup limit is too low (just too low to do anything useful)
-2. The user is using anonymous memory and swap is turned off or too low
-
-A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
-some of the pages cached in the cgroup (page cache pages).
-
-4.2 Task migration
-
-When a task migrates from one cgroup to another, it's charge is not
-carried forward. The pages allocated from the original cgroup still
-remain charged to it, the charge is dropped when the page is freed or
-reclaimed.
-
-4.3 Removing a cgroup
-
-A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
-cgroup might have some charge associated with it, even though all
-tasks have migrated away from it. Such charges are automatically dropped at
-rmdir() if there are no tasks.
-
-4.4 Choosing what to account -- Page Cache (unmapped) vs RSS (mapped)?
-
-The type of memory accounted by the cgroup can be limited to just
-mapped pages by writing "1" to memory.control_type field
-
-echo -n 1 > memory.control_type
-
-5. TODO
-
-1. Add support for accounting huge pages (as a separate controller)
-2. Make per-cgroup scanner reclaim not-shared pages first
-3. Teach controller to account for shared-pages
-4. Start reclamation when the limit is lowered
-5. Start reclamation in the background when the limit is
- not yet hit but the usage is getting closer
-
-Summary
-
-Overall, the memory controller has been a stable controller and has been
-commented and discussed quite extensively in the community.
-
-References
-
-1. Singh, Balbir. RFC: Memory Controller, http://lwn.net/Articles/206697/
-2. Singh, Balbir. Memory Controller (RSS Control),
- http://lwn.net/Articles/222762/
-3. Emelianov, Pavel. Resource controllers based on process cgroups
- http://lkml.org/lkml/2007/3/6/198
-4. Emelianov, Pavel. RSS controller based on process cgroups (v2)
- http://lkml.org/lkml/2007/4/9/74
-5. Emelianov, Pavel. RSS controller based on process cgroups (v3)
- http://lkml.org/lkml/2007/5/30/244
-6. Menage, Paul. Control Groups v10, http://lwn.net/Articles/236032/
-7. Vaidyanathan, Srinivasan, Control Groups: Pagecache accounting and control
- subsystem (v3), http://lwn.net/Articles/235534/
-8. Singh, Balbir. RSS controller V2 test results (lmbench),
- http://lkml.org/lkml/2007/5/17/232
-9. Singh, Balbir. RSS controller V2 AIM9 results
- http://lkml.org/lkml/2007/5/18/1
-10. Singh, Balbir. Memory controller v6 results,
- http://lkml.org/lkml/2007/8/19/36
-11. Singh, Balbir. Memory controller v6, http://lkml.org/lkml/2007/8/17/69
-12. Corbet, Jonathan, Controlling memory use in cgroups,
- http://lwn.net/Articles/243795/
diff --git a/trunk/Documentation/cpusets.txt b/trunk/Documentation/cpusets.txt
index 43db6fe12814..141bef1c8599 100644
--- a/trunk/Documentation/cpusets.txt
+++ b/trunk/Documentation/cpusets.txt
@@ -523,14 +523,21 @@ from one cpuset to another, then the kernel will adjust the tasks
memory placement, as above, the next time that the kernel attempts
to allocate a page of memory for that task.
-If a cpuset has its 'cpus' modified, then each task in that cpuset
-will have its allowed CPU placement changed immediately. Similarly,
-if a tasks pid is written to a cpusets 'tasks' file, in either its
-current cpuset or another cpuset, then its allowed CPU placement is
-changed immediately. If such a task had been bound to some subset
-of its cpuset using the sched_setaffinity() call, the task will be
-allowed to run on any CPU allowed in its new cpuset, negating the
-affect of the prior sched_setaffinity() call.
+If a cpuset has its CPUs modified, then each task using that
+cpuset does _not_ change its behavior automatically. In order to
+minimize the impact on the critical scheduling code in the kernel,
+tasks will continue to use their prior CPU placement until they
+are rebound to their cpuset, by rewriting their pid to the 'tasks'
+file of their cpuset. If a task had been bound to some subset of its
+cpuset using the sched_setaffinity() call, and if any of that subset
+is still allowed in its new cpuset settings, then the task will be
+restricted to the intersection of the CPUs it was allowed on before,
+and its new cpuset CPU placement. If, on the other hand, there is
+no overlap between a tasks prior placement and its new cpuset CPU
+placement, then the task will be allowed to run on any CPU allowed
+in its new cpuset. If a task is moved from one cpuset to another,
+its CPU placement is updated in the same way as if the tasks pid is
+rewritten to the 'tasks' file of its current cpuset.
In summary, the memory placement of a task whose cpuset is changed is
updated by the kernel, on the next allocation of a page for that task,
diff --git a/trunk/Documentation/filesystems/dnotify.txt b/trunk/Documentation/dnotify.txt
similarity index 99%
rename from trunk/Documentation/filesystems/dnotify.txt
rename to trunk/Documentation/dnotify.txt
index 9f5d338ddbb8..6984fca6002a 100644
--- a/trunk/Documentation/filesystems/dnotify.txt
+++ b/trunk/Documentation/dnotify.txt
@@ -69,24 +69,24 @@ Example
#include
#include
#include
-
+
static volatile int event_fd;
-
+
static void handler(int sig, siginfo_t *si, void *data)
{
event_fd = si->si_fd;
}
-
+
int main(void)
{
struct sigaction act;
int fd;
-
+
act.sa_sigaction = handler;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
sigaction(SIGRTMIN + 1, &act, NULL);
-
+
fd = open(".", O_RDONLY);
fcntl(fd, F_SETSIG, SIGRTMIN + 1);
fcntl(fd, F_NOTIFY, DN_MODIFY|DN_CREATE|DN_MULTISHOT);
diff --git a/trunk/Documentation/edac.txt b/trunk/Documentation/drivers/edac/edac.txt
similarity index 100%
rename from trunk/Documentation/edac.txt
rename to trunk/Documentation/drivers/edac/edac.txt
diff --git a/trunk/Documentation/email-clients.txt b/trunk/Documentation/email-clients.txt
index 2ebb94d6ed8e..113165b48305 100644
--- a/trunk/Documentation/email-clients.txt
+++ b/trunk/Documentation/email-clients.txt
@@ -170,6 +170,7 @@ Sylpheed (GUI)
- Works well for inlining text (or using attachments).
- Allows use of an external editor.
+- Not good for IMAP.
- Is slow on large folders.
- Won't do TLS SMTP auth over a non-SSL connection.
- Has a helpful ruler bar in the compose window.
diff --git a/trunk/Documentation/fb/deferred_io.txt b/trunk/Documentation/fb/deferred_io.txt
index 748328370250..63883a892120 100644
--- a/trunk/Documentation/fb/deferred_io.txt
+++ b/trunk/Documentation/fb/deferred_io.txt
@@ -7,10 +7,10 @@ IO. The following example may be a useful explanation of how one such setup
works:
- userspace app like Xfbdev mmaps framebuffer
-- deferred IO and driver sets up fault and page_mkwrite handlers
+- deferred IO and driver sets up nopage and page_mkwrite handlers
- userspace app tries to write to mmaped vaddress
-- we get pagefault and reach fault handler
-- fault handler finds and returns physical page
+- we get pagefault and reach nopage handler
+- nopage handler finds and returns physical page
- we get page_mkwrite where we add this page to a list
- schedule a workqueue task to be run after a delay
- app continues writing to that page with no additional cost. this is
diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt
index 17b1659bd3f8..a7d9d179131a 100644
--- a/trunk/Documentation/feature-removal-schedule.txt
+++ b/trunk/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,14 @@ be removed from this file.
---------------------------
+What: MXSER
+When: December 2007
+Why: Old mxser driver is obsoleted by the mxser_new. Give it some time yet
+ and remove it.
+Who: Jiri Slaby
+
+---------------------------
+
What: dev->power.power_state
When: July 2007
Why: Broken design for runtime control over driver power states, confusing
@@ -200,6 +208,13 @@ Who: Randy Dunlap
---------------------------
+What: drivers depending on OSS_OBSOLETE
+When: options in 2.6.23, code in 2.6.25
+Why: obsolete OSS drivers
+Who: Adrian Bunk
+
+---------------------------
+
What: libata spindown skipping and warning
When: Dec 2008
Why: Some halt(8) implementations synchronize caches for and spin
diff --git a/trunk/Documentation/filesystems/00-INDEX b/trunk/Documentation/filesystems/00-INDEX
index e68021c08fbd..1de155e2dc36 100644
--- a/trunk/Documentation/filesystems/00-INDEX
+++ b/trunk/Documentation/filesystems/00-INDEX
@@ -32,8 +32,6 @@ directory-locking
- info about the locking scheme used for directory operations.
dlmfs.txt
- info on the userspace interface to the OCFS2 DLM.
-dnotify.txt
- - info about directory notification in Linux.
ecryptfs.txt
- docs on eCryptfs: stacked cryptographic filesystem for Linux.
ext2.txt
@@ -82,8 +80,6 @@ relay.txt
- info on relay, for efficient streaming from kernel to user space.
romfs.txt
- description of the ROMFS filesystem.
-sharedsubtree.txt
- - a description of shared subtrees for namespaces.
smbfs.txt
- info on using filesystems with the SMB protocol (Win 3.11 and NT).
spufs.txt
diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking
index 42d4b30b1045..37c10cba7177 100644
--- a/trunk/Documentation/filesystems/Locking
+++ b/trunk/Documentation/filesystems/Locking
@@ -90,6 +90,7 @@ of the locking scheme for directory operations.
prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
+ void (*read_inode) (struct inode *);
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
@@ -113,6 +114,7 @@ locking rules:
BKL s_lock s_umount
alloc_inode: no no no
destroy_inode: no
+read_inode: no (see below)
dirty_inode: no (must not sleep)
write_inode: no
put_inode: no
@@ -131,6 +133,7 @@ show_options: no (vfsmount->sem)
quota_read: no no no (see below)
quota_write: no no no (see below)
+->read_inode() is not a method - it's a callback used in iget().
->remount_fs() will have the s_umount lock if it's already mounted.
When called from get_sb_single, it does NOT have the s_umount lock.
->quota_read() and ->quota_write() functions are both guaranteed to
diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting
index 92b888d540a6..0f33c77bc14b 100644
--- a/trunk/Documentation/filesystems/porting
+++ b/trunk/Documentation/filesystems/porting
@@ -34,8 +34,8 @@ FOO_I(inode) (see in-tree filesystems for examples).
Make them ->alloc_inode and ->destroy_inode in your super_operations.
-Keep in mind that now you need explicit initialization of private data
-typically between calling iget_locked() and unlocking the inode.
+Keep in mind that now you need explicit initialization of private data -
+typically in ->read_inode() and after getting an inode from new_inode().
At some point that will become mandatory.
@@ -173,10 +173,10 @@ should be a non-blocking function that initializes those parts of a
newly created inode to allow the test function to succeed. 'data' is
passed as an opaque value to both test and set functions.
-When the inode has been created by iget5_locked(), it will be returned with the
-I_NEW flag set and will still be locked. The filesystem then needs to finalize
-the initialization. Once the inode is initialized it must be unlocked by
-calling unlock_new_inode().
+When the inode has been created by iget5_locked(), it will be returned with
+the I_NEW flag set and will still be locked. read_inode has not been
+called so the file system still has to finalize the initialization. Once
+the inode is initialized it must be unlocked by calling unlock_new_inode().
The filesystem is responsible for setting (and possibly testing) i_ino
when appropriate. There is also a simpler iget_locked function that
@@ -184,19 +184,11 @@ just takes the superblock and inode number as arguments and does the
test and set for you.
e.g.
- inode = iget_locked(sb, ino);
- if (inode->i_state & I_NEW) {
- err = read_inode_from_disk(inode);
- if (err < 0) {
- iget_failed(inode);
- return err;
- }
- unlock_new_inode(inode);
- }
-
-Note that if the process of setting up a new inode fails, then iget_failed()
-should be called on the inode to render it dead, and an appropriate error
-should be passed back to the caller.
+ inode = iget_locked(sb, ino);
+ if (inode->i_state & I_NEW) {
+ read_inode_from_disk(inode);
+ unlock_new_inode(inode);
+ }
---
[recommended]
diff --git a/trunk/Documentation/filesystems/proc.txt b/trunk/Documentation/filesystems/proc.txt
index 5681e2fa1496..e2799b5fafea 100644
--- a/trunk/Documentation/filesystems/proc.txt
+++ b/trunk/Documentation/filesystems/proc.txt
@@ -1029,14 +1029,6 @@ nr_inodes
Denotes the number of inodes the system has allocated. This number will
grow and shrink dynamically.
-nr_open
--------
-
-Denotes the maximum number of file-handles a process can
-allocate. Default value is 1024*1024 (1048576) which should be
-enough for most machines. Actual limit depends on RLIMIT_NOFILE
-resource limit.
-
nr_free_inodes
--------------
diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt
index bd55038b56f5..9d019d35728f 100644
--- a/trunk/Documentation/filesystems/vfs.txt
+++ b/trunk/Documentation/filesystems/vfs.txt
@@ -203,6 +203,8 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
+ void (*read_inode) (struct inode *);
+
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
@@ -240,6 +242,15 @@ or bottom half).
->alloc_inode was defined and simply undoes anything done by
->alloc_inode.
+ read_inode: this method is called to read a specific inode from the
+ mounted filesystem. The i_ino member in the struct inode is
+ initialized by the VFS to indicate which inode to read. Other
+ members are filled in by this method.
+
+ You can set this to NULL and use iget5_locked() instead of iget()
+ to read inodes. This is necessary for filesystems for which the
+ inode number is not sufficient to identify an inode.
+
dirty_inode: this method is called by the VFS to mark an inode dirty.
write_inode: this method is called when the VFS needs to write an
@@ -297,9 +308,9 @@ or bottom half).
quota_write: called by the VFS to write to filesystem quota file.
-Whoever sets up the inode is responsible for filling in the "i_op" field. This
-is a pointer to a "struct inode_operations" which describes the methods that
-can be performed on individual inodes.
+The read_inode() method is responsible for filling in the "i_op"
+field. This is a pointer to a "struct inode_operations" which
+describes the methods that can be performed on individual inodes.
The Inode Object
diff --git a/trunk/Documentation/kprobes.txt b/trunk/Documentation/kprobes.txt
index 30c101761d0d..53a63890aea4 100644
--- a/trunk/Documentation/kprobes.txt
+++ b/trunk/Documentation/kprobes.txt
@@ -96,9 +96,7 @@ or in registers (e.g., for x86_64 or for an i386 fastcall function).
The jprobe will work in either case, so long as the handler's
prototype matches that of the probed function.
-1.3 Return Probes
-
-1.3.1 How Does a Return Probe Work?
+1.3 How Does a Return Probe Work?
When you call register_kretprobe(), Kprobes establishes a kprobe at
the entry to the function. When the probed function is called and this
@@ -109,9 +107,9 @@ At boot time, Kprobes registers a kprobe at the trampoline.
When the probed function executes its return instruction, control
passes to the trampoline and that probe is hit. Kprobes' trampoline
-handler calls the user-specified return handler associated with the
-kretprobe, then sets the saved instruction pointer to the saved return
-address, and that's where execution resumes upon return from the trap.
+handler calls the user-specified handler associated with the kretprobe,
+then sets the saved instruction pointer to the saved return address,
+and that's where execution resumes upon return from the trap.
While the probed function is executing, its return address is
stored in an object of type kretprobe_instance. Before calling
@@ -133,30 +131,6 @@ zero when the return probe is registered, and is incremented every
time the probed function is entered but there is no kretprobe_instance
object available for establishing the return probe.
-1.3.2 Kretprobe entry-handler
-
-Kretprobes also provides an optional user-specified handler which runs
-on function entry. This handler is specified by setting the entry_handler
-field of the kretprobe struct. Whenever the kprobe placed by kretprobe at the
-function entry is hit, the user-defined entry_handler, if any, is invoked.
-If the entry_handler returns 0 (success) then a corresponding return handler
-is guaranteed to be called upon function return. If the entry_handler
-returns a non-zero error then Kprobes leaves the return address as is, and
-the kretprobe has no further effect for that particular function instance.
-
-Multiple entry and return handler invocations are matched using the unique
-kretprobe_instance object associated with them. Additionally, a user
-may also specify per return-instance private data to be part of each
-kretprobe_instance object. This is especially useful when sharing private
-data between corresponding user entry and return handlers. The size of each
-private data object can be specified at kretprobe registration time by
-setting the data_size field of the kretprobe struct. This data can be
-accessed through the data field of each kretprobe_instance object.
-
-In case probed function is entered but there is no kretprobe_instance
-object available, then in addition to incrementing the nmissed count,
-the user entry_handler invocation is also skipped.
-
2. Architectures Supported
Kprobes, jprobes, and return probes are implemented on the following
@@ -300,8 +274,6 @@ of interest:
- ret_addr: the return address
- rp: points to the corresponding kretprobe object
- task: points to the corresponding task struct
-- data: points to per return-instance private data; see "Kretprobe
- entry-handler" for details.
The regs_return_value(regs) macro provides a simple abstraction to
extract the return value from the appropriate register as defined by
@@ -584,52 +556,23 @@ report failed calls to sys_open().
#include
#include
#include
-#include
-
-/* per-instance private data */
-struct my_data {
- ktime_t entry_stamp;
-};
static const char *probed_func = "sys_open";
-/* Timestamp function entry. */
-static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
-{
- struct my_data *data;
-
- if(!current->mm)
- return 1; /* skip kernel threads */
-
- data = (struct my_data *)ri->data;
- data->entry_stamp = ktime_get();
- return 0;
-}
-
-/* If the probed function failed, log the return value and duration.
- * Duration may turn out to be zero consistently, depending upon the
- * granularity of time accounting on the platform. */
-static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+/* Return-probe handler: If the probed function fails, log the return value. */
+static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
int retval = regs_return_value(regs);
- struct my_data *data = (struct my_data *)ri->data;
- s64 delta;
- ktime_t now;
-
if (retval < 0) {
- now = ktime_get();
- delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
- printk("%s: return val = %d (duration = %lld ns)\n",
- probed_func, retval, delta);
+ printk("%s returns %d\n", probed_func, retval);
}
return 0;
}
static struct kretprobe my_kretprobe = {
- .handler = return_handler,
- .entry_handler = entry_handler,
- .data_size = sizeof(struct my_data),
- .maxactive = 20, /* probe up to 20 instances concurrently */
+ .handler = ret_handler,
+ /* Probe up to 20 instances concurrently. */
+ .maxactive = 20
};
static int __init kretprobe_init(void)
@@ -641,7 +584,7 @@ static int __init kretprobe_init(void)
printk("register_kretprobe failed, returned %d\n", ret);
return -1;
}
- printk("Kretprobe active on %s\n", my_kretprobe.kp.symbol_name);
+ printk("Planted return probe at %p\n", my_kretprobe.kp.addr);
return 0;
}
@@ -651,7 +594,7 @@ static void __exit kretprobe_exit(void)
printk("kretprobe unregistered\n");
/* nmissed > 0 suggests that maxactive was set too low. */
printk("Missed probing %d instances of %s\n",
- my_kretprobe.nmissed, probed_func);
+ my_kretprobe.nmissed, probed_func);
}
module_init(kretprobe_init)
diff --git a/trunk/Documentation/kref.txt b/trunk/Documentation/kref.txt
index 130b6e87aa7e..f38b59d00c63 100644
--- a/trunk/Documentation/kref.txt
+++ b/trunk/Documentation/kref.txt
@@ -141,10 +141,10 @@ The last rule (rule 3) is the nastiest one to handle. Say, for
instance, you have a list of items that are each kref-ed, and you wish
to get the first one. You can't just pull the first item off the list
and kref_get() it. That violates rule 3 because you are not already
-holding a valid pointer. You must add a mutex (or some other lock).
-For instance:
+holding a valid pointer. You must add locks or semaphores. For
+instance:
-static DEFINE_MUTEX(mutex);
+static DECLARE_MUTEX(sem);
static LIST_HEAD(q);
struct my_data
{
@@ -155,12 +155,12 @@ struct my_data
static struct my_data *get_entry()
{
struct my_data *entry = NULL;
- mutex_lock(&mutex);
+ down(&sem);
if (!list_empty(&q)) {
entry = container_of(q.next, struct my_q_entry, link);
kref_get(&entry->refcount);
}
- mutex_unlock(&mutex);
+ up(&sem);
return entry;
}
@@ -174,9 +174,9 @@ static void release_entry(struct kref *ref)
static void put_entry(struct my_data *entry)
{
- mutex_lock(&mutex);
+ down(&sem);
kref_put(&entry->refcount, release_entry);
- mutex_unlock(&mutex);
+ up(&sem);
}
The kref_put() return value is useful if you do not want to hold the
@@ -191,13 +191,13 @@ static void release_entry(struct kref *ref)
static void put_entry(struct my_data *entry)
{
- mutex_lock(&mutex);
+ down(&sem);
if (kref_put(&entry->refcount, release_entry)) {
list_del(&entry->link);
- mutex_unlock(&mutex);
+ up(&sem);
kfree(entry);
} else
- mutex_unlock(&mutex);
+ up(&sem);
}
This is really more useful if you have to call other routines as part
diff --git a/trunk/Documentation/md.txt b/trunk/Documentation/md.txt
index 396cdd982c26..5818628207b5 100644
--- a/trunk/Documentation/md.txt
+++ b/trunk/Documentation/md.txt
@@ -416,16 +416,6 @@ also have
sectors in total that could need to be processed. The two
numbers are separated by a '/' thus effectively showing one
value, a fraction of the process that is complete.
- A 'select' on this attribute will return when resync completes,
- when it reaches the current sync_max (below) and possibly at
- other times.
-
- sync_max
- This is a number of sectors at which point a resync/recovery
- process will pause. When a resync is active, the value can
- only ever be increased, never decreased. The value of 'max'
- effectively disables the limit.
-
sync_speed
This shows the current actual speed, in K/sec, of the current
diff --git a/trunk/Documentation/rtc.txt b/trunk/Documentation/rtc.txt
index 8deffcd68cb8..e20b19c1b60d 100644
--- a/trunk/Documentation/rtc.txt
+++ b/trunk/Documentation/rtc.txt
@@ -182,8 +182,8 @@ driver returns ENOIOCTLCMD. Some common examples:
since the frequency is stored in the irq_freq member of the rtc_device
structure. Your driver needs to initialize the irq_freq member during
init. Make sure you check the requested frequency is in range of your
- hardware in the irq_set_freq function. If it isn't, return -EINVAL. If
- you cannot actually change the frequency, do not define irq_set_freq.
+ hardware in the irq_set_freq function. If you cannot actually change
+ the frequency, just return -ENOTTY.
If all else fails, check out the rtc-test.c driver!
@@ -268,8 +268,8 @@ int main(int argc, char **argv)
/* This read will block */
retval = read(fd, &data, sizeof(unsigned long));
if (retval == -1) {
- perror("read");
- exit(errno);
+ perror("read");
+ exit(errno);
}
fprintf(stderr, " %d",i);
fflush(stderr);
@@ -326,11 +326,11 @@ test_READ:
rtc_tm.tm_sec %= 60;
rtc_tm.tm_min++;
}
- if (rtc_tm.tm_min == 60) {
+ if (rtc_tm.tm_min == 60) {
rtc_tm.tm_min = 0;
rtc_tm.tm_hour++;
}
- if (rtc_tm.tm_hour == 24)
+ if (rtc_tm.tm_hour == 24)
rtc_tm.tm_hour = 0;
retval = ioctl(fd, RTC_ALM_SET, &rtc_tm);
@@ -407,8 +407,8 @@ test_PIE:
"\n...Periodic IRQ rate is fixed\n");
goto done;
}
- perror("RTC_IRQP_SET ioctl");
- exit(errno);
+ perror("RTC_IRQP_SET ioctl");
+ exit(errno);
}
fprintf(stderr, "\n%ldHz:\t", tmp);
@@ -417,27 +417,27 @@ test_PIE:
/* Enable periodic interrupts */
retval = ioctl(fd, RTC_PIE_ON, 0);
if (retval == -1) {
- perror("RTC_PIE_ON ioctl");
- exit(errno);
+ perror("RTC_PIE_ON ioctl");
+ exit(errno);
}
for (i=1; i<21; i++) {
- /* This blocks */
- retval = read(fd, &data, sizeof(unsigned long));
- if (retval == -1) {
- perror("read");
- exit(errno);
- }
- fprintf(stderr, " %d",i);
- fflush(stderr);
- irqcount++;
+ /* This blocks */
+ retval = read(fd, &data, sizeof(unsigned long));
+ if (retval == -1) {
+ perror("read");
+ exit(errno);
+ }
+ fprintf(stderr, " %d",i);
+ fflush(stderr);
+ irqcount++;
}
/* Disable periodic interrupts */
retval = ioctl(fd, RTC_PIE_OFF, 0);
if (retval == -1) {
- perror("RTC_PIE_OFF ioctl");
- exit(errno);
+ perror("RTC_PIE_OFF ioctl");
+ exit(errno);
}
}
diff --git a/trunk/Documentation/scheduler/sched-arch.txt b/trunk/Documentation/sched-arch.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-arch.txt
rename to trunk/Documentation/sched-arch.txt
diff --git a/trunk/Documentation/scheduler/sched-coding.txt b/trunk/Documentation/sched-coding.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-coding.txt
rename to trunk/Documentation/sched-coding.txt
diff --git a/trunk/Documentation/scheduler/sched-design-CFS.txt b/trunk/Documentation/sched-design-CFS.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-design-CFS.txt
rename to trunk/Documentation/sched-design-CFS.txt
diff --git a/trunk/Documentation/scheduler/sched-design.txt b/trunk/Documentation/sched-design.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-design.txt
rename to trunk/Documentation/sched-design.txt
diff --git a/trunk/Documentation/scheduler/sched-domains.txt b/trunk/Documentation/sched-domains.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-domains.txt
rename to trunk/Documentation/sched-domains.txt
diff --git a/trunk/Documentation/scheduler/sched-nice-design.txt b/trunk/Documentation/sched-nice-design.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-nice-design.txt
rename to trunk/Documentation/sched-nice-design.txt
diff --git a/trunk/Documentation/scheduler/sched-stats.txt b/trunk/Documentation/sched-stats.txt
similarity index 100%
rename from trunk/Documentation/scheduler/sched-stats.txt
rename to trunk/Documentation/sched-stats.txt
diff --git a/trunk/Documentation/scheduler/00-INDEX b/trunk/Documentation/scheduler/00-INDEX
deleted file mode 100644
index b5f5ca069b2d..000000000000
--- a/trunk/Documentation/scheduler/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - this file.
-sched-arch.txt
- - CPU Scheduler implementation hints for architecture specific code.
-sched-coding.txt
- - reference for various scheduler-related methods in the O(1) scheduler.
-sched-design.txt
- - goals, design and implementation of the Linux O(1) scheduler.
-sched-design-CFS.txt
- - goals, design and implementation of the Complete Fair Scheduler.
-sched-domains.txt
- - information on scheduling domains.
-sched-nice-design.txt
- - How and why the scheduler's nice levels are implemented.
-sched-stats.txt
- - information on schedstats (Linux Scheduler Statistics).
diff --git a/trunk/Documentation/filesystems/sharedsubtree.txt b/trunk/Documentation/sharedsubtree.txt
similarity index 100%
rename from trunk/Documentation/filesystems/sharedsubtree.txt
rename to trunk/Documentation/sharedsubtree.txt
diff --git a/trunk/Documentation/sysctl/fs.txt b/trunk/Documentation/sysctl/fs.txt
index f99254327ae5..aa986a35e994 100644
--- a/trunk/Documentation/sysctl/fs.txt
+++ b/trunk/Documentation/sysctl/fs.txt
@@ -23,7 +23,6 @@ Currently, these files are in /proc/sys/fs:
- inode-max
- inode-nr
- inode-state
-- nr_open
- overflowuid
- overflowgid
- suid_dumpable
@@ -92,15 +91,6 @@ usage of file handles and you don't need to increase the maximum.
==============================================================
-nr_open:
-
-This denotes the maximum number of file-handles a process can
-allocate. Default value is 1024*1024 (1048576) which should be
-enough for most machines. Actual limit depends on RLIMIT_NOFILE
-resource limit.
-
-==============================================================
-
inode-max, inode-nr & inode-state:
As with file handles, the kernel allocates the inode structures
diff --git a/trunk/Documentation/sysctl/vm.txt b/trunk/Documentation/sysctl/vm.txt
index 8a4863c4edd4..24eac1bc735d 100644
--- a/trunk/Documentation/sysctl/vm.txt
+++ b/trunk/Documentation/sysctl/vm.txt
@@ -32,7 +32,6 @@ Currently, these files are in /proc/sys/vm:
- min_unmapped_ratio
- min_slab_ratio
- panic_on_oom
-- oom_dump_tasks
- oom_kill_allocating_task
- mmap_min_address
- numa_zonelist_order
@@ -233,27 +232,6 @@ according to your policy of failover.
=============================================================
-oom_dump_tasks
-
-Enables a system-wide task dump (excluding kernel threads) to be
-produced when the kernel performs an OOM-killing and includes such
-information as pid, uid, tgid, vm size, rss, cpu, oom_adj score, and
-name. This is helpful to determine why the OOM killer was invoked
-and to identify the rogue task that caused it.
-
-If this is set to zero, this information is suppressed. On very
-large systems with thousands of tasks it may not be feasible to dump
-the memory state information for each one. Such systems should not
-be forced to incur a performance penalty in OOM conditions when the
-information may not be desired.
-
-If this is set to non-zero, this information is shown whenever the
-OOM killer actually kills a memory-hogging task.
-
-The default value is 0.
-
-=============================================================
-
oom_kill_allocating_task
This enables or disables killing the OOM-triggering task in
diff --git a/trunk/Documentation/unaligned-memory-access.txt b/trunk/Documentation/unaligned-memory-access.txt
deleted file mode 100644
index 6223eace3c09..000000000000
--- a/trunk/Documentation/unaligned-memory-access.txt
+++ /dev/null
@@ -1,226 +0,0 @@
-UNALIGNED MEMORY ACCESSES
-=========================
-
-Linux runs on a wide variety of architectures which have varying behaviour
-when it comes to memory access. This document presents some details about
-unaligned accesses, why you need to write code that doesn't cause them,
-and how to write such code!
-
-
-The definition of an unaligned access
-=====================================
-
-Unaligned memory accesses occur when you try to read N bytes of data starting
-from an address that is not evenly divisible by N (i.e. addr % N != 0).
-For example, reading 4 bytes of data from address 0x10004 is fine, but
-reading 4 bytes of data from address 0x10005 would be an unaligned memory
-access.
-
-The above may seem a little vague, as memory access can happen in different
-ways. The context here is at the machine code level: certain instructions read
-or write a number of bytes to or from memory (e.g. movb, movw, movl in x86
-assembly). As will become clear, it is relatively easy to spot C statements
-which will compile to multiple-byte memory access instructions, namely when
-dealing with types such as u16, u32 and u64.
-
-
-Natural alignment
-=================
-
-The rule mentioned above forms what we refer to as natural alignment:
-When accessing N bytes of memory, the base memory address must be evenly
-divisible by N, i.e. addr % N == 0.
-
-When writing code, assume the target architecture has natural alignment
-requirements.
-
-In reality, only a few architectures require natural alignment on all sizes
-of memory access. However, we must consider ALL supported architectures;
-writing code that satisfies natural alignment requirements is the easiest way
-to achieve full portability.
-
-
-Why unaligned access is bad
-===========================
-
-The effects of performing an unaligned memory access vary from architecture
-to architecture. It would be easy to write a whole document on the differences
-here; a summary of the common scenarios is presented below:
-
- - Some architectures are able to perform unaligned memory accesses
- transparently, but there is usually a significant performance cost.
- - Some architectures raise processor exceptions when unaligned accesses
- happen. The exception handler is able to correct the unaligned access,
- at significant cost to performance.
- - Some architectures raise processor exceptions when unaligned accesses
- happen, but the exceptions do not contain enough information for the
- unaligned access to be corrected.
- - Some architectures are not capable of unaligned memory access, but will
- silently perform a different memory access to the one that was requested,
- resulting a a subtle code bug that is hard to detect!
-
-It should be obvious from the above that if your code causes unaligned
-memory accesses to happen, your code will not work correctly on certain
-platforms and will cause performance problems on others.
-
-
-Code that does not cause unaligned access
-=========================================
-
-At first, the concepts above may seem a little hard to relate to actual
-coding practice. After all, you don't have a great deal of control over
-memory addresses of certain variables, etc.
-
-Fortunately things are not too complex, as in most cases, the compiler
-ensures that things will work for you. For example, take the following
-structure:
-
- struct foo {
- u16 field1;
- u32 field2;
- u8 field3;
- };
-
-Let us assume that an instance of the above structure resides in memory
-starting at address 0x10000. With a basic level of understanding, it would
-not be unreasonable to expect that accessing field2 would cause an unaligned
-access. You'd be expecting field2 to be located at offset 2 bytes into the
-structure, i.e. address 0x10002, but that address is not evenly divisible
-by 4 (remember, we're reading a 4 byte value here).
-
-Fortunately, the compiler understands the alignment constraints, so in the
-above case it would insert 2 bytes of padding in between field1 and field2.
-Therefore, for standard structure types you can always rely on the compiler
-to pad structures so that accesses to fields are suitably aligned (assuming
-you do not cast the field to a type of different length).
-
-Similarly, you can also rely on the compiler to align variables and function
-parameters to a naturally aligned scheme, based on the size of the type of
-the variable.
-
-At this point, it should be clear that accessing a single byte (u8 or char)
-will never cause an unaligned access, because all memory addresses are evenly
-divisible by one.
-
-On a related topic, with the above considerations in mind you may observe
-that you could reorder the fields in the structure in order to place fields
-where padding would otherwise be inserted, and hence reduce the overall
-resident memory size of structure instances. The optimal layout of the
-above example is:
-
- struct foo {
- u32 field2;
- u16 field1;
- u8 field3;
- };
-
-For a natural alignment scheme, the compiler would only have to add a single
-byte of padding at the end of the structure. This padding is added in order
-to satisfy alignment constraints for arrays of these structures.
-
-Another point worth mentioning is the use of __attribute__((packed)) on a
-structure type. This GCC-specific attribute tells the compiler never to
-insert any padding within structures, useful when you want to use a C struct
-to represent some data that comes in a fixed arrangement 'off the wire'.
-
-You might be inclined to believe that usage of this attribute can easily
-lead to unaligned accesses when accessing fields that do not satisfy
-architectural alignment requirements. However, again, the compiler is aware
-of the alignment constraints and will generate extra instructions to perform
-the memory access in a way that does not cause unaligned access. Of course,
-the extra instructions obviously cause a loss in performance compared to the
-non-packed case, so the packed attribute should only be used when avoiding
-structure padding is of importance.
-
-
-Code that causes unaligned access
-=================================
-
-With the above in mind, let's move onto a real life example of a function
-that can cause an unaligned memory access. The following function adapted
-from include/linux/etherdevice.h is an optimized routine to compare two
-ethernet MAC addresses for equality.
-
-unsigned int compare_ether_addr(const u8 *addr1, const u8 *addr2)
-{
- const u16 *a = (const u16 *) addr1;
- const u16 *b = (const u16 *) addr2;
- return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
-}
-
-In the above function, the reference to a[0] causes 2 bytes (16 bits) to
-be read from memory starting at address addr1. Think about what would happen
-if addr1 was an odd address such as 0x10003. (Hint: it'd be an unaligned
-access.)
-
-Despite the potential unaligned access problems with the above function, it
-is included in the kernel anyway but is understood to only work on
-16-bit-aligned addresses. It is up to the caller to ensure this alignment or
-not use this function at all. This alignment-unsafe function is still useful
-as it is a decent optimization for the cases when you can ensure alignment,
-which is true almost all of the time in ethernet networking context.
-
-
-Here is another example of some code that could cause unaligned accesses:
- void myfunc(u8 *data, u32 value)
- {
- [...]
- *((u32 *) data) = cpu_to_le32(value);
- [...]
- }
-
-This code will cause unaligned accesses every time the data parameter points
-to an address that is not evenly divisible by 4.
-
-In summary, the 2 main scenarios where you may run into unaligned access
-problems involve:
- 1. Casting variables to types of different lengths
- 2. Pointer arithmetic followed by access to at least 2 bytes of data
-
-
-Avoiding unaligned accesses
-===========================
-
-The easiest way to avoid unaligned access is to use the get_unaligned() and
-put_unaligned() macros provided by the header file.
-
-Going back to an earlier example of code that potentially causes unaligned
-access:
-
- void myfunc(u8 *data, u32 value)
- {
- [...]
- *((u32 *) data) = cpu_to_le32(value);
- [...]
- }
-
-To avoid the unaligned memory access, you would rewrite it as follows:
-
- void myfunc(u8 *data, u32 value)
- {
- [...]
- value = cpu_to_le32(value);
- put_unaligned(value, (u32 *) data);
- [...]
- }
-
-The get_unaligned() macro works similarly. Assuming 'data' is a pointer to
-memory and you wish to avoid unaligned access, its usage is as follows:
-
- u32 value = get_unaligned((u32 *) data);
-
-These macros work work for memory accesses of any length (not just 32 bits as
-in the examples above). Be aware that when compared to standard access of
-aligned memory, using these macros to access unaligned memory can be costly in
-terms of performance.
-
-If use of such macros is not convenient, another option is to use memcpy(),
-where the source or destination (or both) are of type u8* or unsigned char*.
-Due to the byte-wise nature of this operation, unaligned accesses are avoided.
-
---
-Author: Daniel Drake
-With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt,
-Johannes Berg, Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock,
-Uli Kunitz, Vadim Lobanov
-
diff --git a/trunk/Documentation/w1/masters/00-INDEX b/trunk/Documentation/w1/masters/00-INDEX
index 7b0ceaaad7af..752613c4cea2 100644
--- a/trunk/Documentation/w1/masters/00-INDEX
+++ b/trunk/Documentation/w1/masters/00-INDEX
@@ -4,5 +4,3 @@ ds2482
- The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
ds2490
- The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
-w1-gpio
- - GPIO 1-wire bus master driver.
diff --git a/trunk/Documentation/w1/masters/w1-gpio b/trunk/Documentation/w1/masters/w1-gpio
deleted file mode 100644
index af5d3b4aa851..000000000000
--- a/trunk/Documentation/w1/masters/w1-gpio
+++ /dev/null
@@ -1,33 +0,0 @@
-Kernel driver w1-gpio
-=====================
-
-Author: Ville Syrjala
-
-
-Description
------------
-
-GPIO 1-wire bus master driver. The driver uses the GPIO API to control the
-wire and the GPIO pin can be specified using platform data.
-
-
-Example (mach-at91)
--------------------
-
-#include
-
-static struct w1_gpio_platform_data foo_w1_gpio_pdata = {
- .pin = AT91_PIN_PB20,
- .is_open_drain = 1,
-};
-
-static struct platform_device foo_w1_device = {
- .name = "w1-gpio",
- .id = -1,
- .dev.platform_data = &foo_w1_gpio_pdata,
-};
-
-...
- at91_set_GPIO_periph(foo_w1_gpio_pdata.pin, 1);
- at91_set_multi_drive(foo_w1_gpio_pdata.pin, 1);
- platform_device_register(&foo_w1_device);
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index a372c86fd07b..4f3da8b56979 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -338,12 +338,13 @@ S: Maintained for 2.4; PCI support for 2.6.
AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
P: Thomas Dahlmann
M: thomas.dahlmann@amd.com
-L: info-linux@geode.amd.com (subscribers-only)
+L: info-linux@geode.amd.com
S: Supported
AMD GEODE PROCESSOR/CHIPSET SUPPORT
P: Jordan Crouse
-L: info-linux@geode.amd.com (subscribers-only)
+M: info-linux@geode.amd.com
+L: info-linux@geode.amd.com
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
@@ -840,12 +841,6 @@ L: linux-kernel@vger.kernel.org
T: git kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
S: Maintained
-BLOCK2MTD DRIVER
-P: Joern Engel
-M: joern@lazybastard.org
-L: linux-mtd@lists.infradead.org
-S: Maintained
-
BLUETOOTH SUBSYSTEM
P: Marcel Holtmann
M: marcel@holtmann.org
@@ -1371,11 +1366,6 @@ W: http://linuxtv.org/
T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
S: Maintained
-DZ DECSTATION DZ11 SERIAL DRIVER
-P: Maciej W. Rozycki
-M: macro@linux-mips.org
-S: Maintained
-
EATA-DMA SCSI DRIVER
P: Michael Neuffer
L: linux-eata@i-connect.net, linux-scsi@vger.kernel.org
@@ -3041,8 +3031,8 @@ L: linux-abi-devel@lists.sourceforge.net
S: Maintained
PHRAM MTD DRIVER
-P: Joern Engel
-M: joern@lazybastard.org
+P: Jörn Engel
+M: joern@wh.fh-wedel.de
L: linux-mtd@lists.infradead.org
S: Maintained
@@ -3229,12 +3219,6 @@ M: mporter@kernel.crashing.org
L: linux-kernel@vger.kernel.org
S: Maintained
-RDC R-321X SoC
-P: Florian Fainelli
-M: florian.fainelli@telecomint.eu
-L: linux-kernel@vger.kernel.org
-S: Maintained
-
RDC R6040 FAST ETHERNET DRIVER
P: Florian Fainelli
M: florian.fainelli@telecomint.eu
@@ -3872,12 +3856,6 @@ M: oliver@neukum.name
L: linux-usb@vger.kernel.org
S: Maintained
-USB AUERSWALD DRIVER
-P: Wolfgang Muees
-M: wolfgang@iksw-muees.de
-L: linux-usb@vger.kernel.org
-S: Maintained
-
USB BLOCK DRIVER (UB ub)
P: Pete Zaitcev
M: zaitcev@redhat.com
@@ -4028,6 +4006,12 @@ S: Maintained
W: http://geocities.com/i0xox0i
W: http://firstlight.net/cvs
+USB AUERSWALD DRIVER
+P: Wolfgang Muees
+M: wolfgang@iksw-muees.de
+L: linux-usb@vger.kernel.org
+S: Maintained
+
USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
P: Gary Brubaker
M: xavyer@ix.netcom.com
diff --git a/trunk/REPORTING-BUGS b/trunk/REPORTING-BUGS
index ab0c56630a8c..ac02e42a2627 100644
--- a/trunk/REPORTING-BUGS
+++ b/trunk/REPORTING-BUGS
@@ -10,12 +10,11 @@ bug report. This explains what you should do with the "Oops" information
to make it useful to the recipient.
Send the output to the maintainer of the kernel area that seems to
-be involved with the problem, and cc the relevant mailing list. Don't
-worry too much about getting the wrong person. If you are unsure send it
-to the person responsible for the code relevant to what you were doing.
-If it occurs repeatably try and describe how to recreate it. That is
-worth even more than the oops itself. The list of maintainers and
-mailing lists is in the MAINTAINERS file in this directory.
+be involved with the problem. Don't worry too much about getting the
+wrong person. If you are unsure send it to the person responsible for the
+code relevant to what you were doing. If it occurs repeatably try and
+describe how to recreate it. That is worth even more than the oops itself.
+The list of maintainers is in the MAINTAINERS file in this directory.
If it is a security bug, please copy the Security Contact listed
in the MAINTAINERS file. They can help coordinate bugfix and disclosure.
diff --git a/trunk/arch/alpha/Kconfig.debug b/trunk/arch/alpha/Kconfig.debug
index 3f6265f2d9d4..f45f28cc10da 100644
--- a/trunk/arch/alpha/Kconfig.debug
+++ b/trunk/arch/alpha/Kconfig.debug
@@ -7,6 +7,15 @@ config EARLY_PRINTK
depends on ALPHA_GENERIC || ALPHA_SRM
default y
+config DEBUG_RWLOCK
+ bool "Read-write spinlock debugging"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here then read-write lock processing will count how many
+ times it has tried to get the lock and issue an error message after
+ too many attempts. If you suspect a rwlock problem or a kernel
+ hacker asks for this option then say Y. Otherwise say N.
+
config ALPHA_LEGACY_START_ADDRESS
bool "Legacy kernel start address"
depends on ALPHA_GENERIC
diff --git a/trunk/arch/alpha/defconfig b/trunk/arch/alpha/defconfig
index e43f68fd66b0..6da9c3dbde44 100644
--- a/trunk/arch/alpha/defconfig
+++ b/trunk/arch/alpha/defconfig
@@ -882,6 +882,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_SPINLOCK is not set
CONFIG_DEBUG_INFO=y
CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_RWLOCK is not set
# CONFIG_DEBUG_SEMAPHORE is not set
CONFIG_ALPHA_LEGACY_START_ADDRESS=y
CONFIG_MATHEMU=y
diff --git a/trunk/arch/alpha/kernel/core_irongate.c b/trunk/arch/alpha/kernel/core_irongate.c
index a872078497be..e4a0bcf1d28b 100644
--- a/trunk/arch/alpha/kernel/core_irongate.c
+++ b/trunk/arch/alpha/kernel/core_irongate.c
@@ -241,8 +241,7 @@ albacore_init_arch(void)
size / 1024);
}
#endif
- reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop -
- pci_mem, BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - pci_mem);
printk("irongate_init_arch: temporarily reserving "
"region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
}
diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c
index 72f9a619a66d..6413c5f23226 100644
--- a/trunk/arch/alpha/kernel/osf_sys.c
+++ b/trunk/arch/alpha/kernel/osf_sys.c
@@ -430,7 +430,7 @@ sys_getpagesize(void)
asmlinkage unsigned long
sys_getdtablesize(void)
{
- return sysctl_nr_open;
+ return NR_OPEN;
}
/*
diff --git a/trunk/arch/alpha/kernel/setup.c b/trunk/arch/alpha/kernel/setup.c
index 74c346625658..beff6297f788 100644
--- a/trunk/arch/alpha/kernel/setup.c
+++ b/trunk/arch/alpha/kernel/setup.c
@@ -428,8 +428,7 @@ setup_memory(void *kernel_end)
}
/* Reserve the bootmap memory. */
- reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
#ifdef CONFIG_BLK_DEV_INITRD
@@ -447,7 +446,7 @@ setup_memory(void *kernel_end)
phys_to_virt(PFN_PHYS(max_low_pfn)));
} else {
reserve_bootmem(virt_to_phys((void *)initrd_start),
- INITRD_SIZE, BOOTMEM_DEFAULT);
+ INITRD_SIZE);
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/trunk/arch/alpha/kernel/smp.c b/trunk/arch/alpha/kernel/smp.c
index 63c2073401ee..f4ab233201b2 100644
--- a/trunk/arch/alpha/kernel/smp.c
+++ b/trunk/arch/alpha/kernel/smp.c
@@ -77,6 +77,10 @@ int smp_num_probed; /* Internal processor count */
int smp_num_cpus = 1; /* Number that came online. */
EXPORT_SYMBOL(smp_num_cpus);
+extern void calibrate_delay(void);
+
+
+
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
diff --git a/trunk/arch/alpha/mm/numa.c b/trunk/arch/alpha/mm/numa.c
index 10ab7833e83c..e3e3806a6f25 100644
--- a/trunk/arch/alpha/mm/numa.c
+++ b/trunk/arch/alpha/mm/numa.c
@@ -242,8 +242,7 @@ setup_memory_node(int nid, void *kernel_end)
}
/* Reserve the bootmap memory. */
- reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start),
- bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), bootmap_size);
printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
node_set_online(nid);
@@ -282,7 +281,7 @@ setup_memory(void *kernel_end)
nid = kvaddr_to_nid(initrd_start);
reserve_bootmem_node(NODE_DATA(nid),
virt_to_phys((void *)initrd_start),
- INITRD_SIZE, BOOTMEM_DEFAULT);
+ INITRD_SIZE);
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/trunk/arch/arm/mach-at91/board-sam9261ek.c b/trunk/arch/arm/mach-at91/board-sam9261ek.c
index 0ce38dfa6ebe..aa29ea58ca09 100644
--- a/trunk/arch/arm/mach-at91/board-sam9261ek.c
+++ b/trunk/arch/arm/mach-at91/board-sam9261ek.c
@@ -383,7 +383,6 @@ static void at91_lcdc_tft_power_control(int on)
}
static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
- .lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9261_DEFAULT_TFT_LCDCON2,
diff --git a/trunk/arch/arm/mach-at91/board-sam9263ek.c b/trunk/arch/arm/mach-at91/board-sam9263ek.c
index 38313abef657..f09347a86e71 100644
--- a/trunk/arch/arm/mach-at91/board-sam9263ek.c
+++ b/trunk/arch/arm/mach-at91/board-sam9263ek.c
@@ -253,7 +253,6 @@ static void at91_lcdc_power_control(int on)
/* Driver datas */
static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
- .lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9263_DEFAULT_LCDCON2,
diff --git a/trunk/arch/arm/mach-rpc/riscpc.c b/trunk/arch/arm/mach-rpc/riscpc.c
index eca558c6bf5d..a454451c97c3 100644
--- a/trunk/arch/arm/mach-rpc/riscpc.c
+++ b/trunk/arch/arm/mach-rpc/riscpc.c
@@ -17,7 +17,7 @@
#include
#include
#include
-#include
+#include
#include
#include
diff --git a/trunk/arch/arm/mm/init.c b/trunk/arch/arm/mm/init.c
index ec00f26bffa4..c0ad7c0fbae0 100644
--- a/trunk/arch/arm/mm/init.c
+++ b/trunk/arch/arm/mm/init.c
@@ -239,7 +239,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
* Reserve the bootmem bitmap for this node.
*/
reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
- boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
+ boot_pages << PAGE_SHIFT);
#ifdef CONFIG_BLK_DEV_INITRD
/*
@@ -247,7 +247,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
*/
if (node == initrd_node) {
reserve_bootmem_node(pgdat, phys_initrd_start,
- phys_initrd_size, BOOTMEM_DEFAULT);
+ phys_initrd_size);
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c
index d41a75ed3dce..e5d61ee3d4a1 100644
--- a/trunk/arch/arm/mm/mmu.c
+++ b/trunk/arch/arm/mm/mmu.c
@@ -605,11 +605,9 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* Note that this can only be in node 0.
*/
#ifdef CONFIG_XIP_KERNEL
- reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
#else
- reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
#endif
/*
@@ -617,7 +615,7 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* and can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
- PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
+ PTRS_PER_PGD * sizeof(pgd_t));
/*
* Hmm... This should go elsewhere, but we really really need to
@@ -640,10 +638,8 @@ void __init reserve_node_zero(pg_data_t *pgdat)
/* H1940 and RX3715 need to reserve this for suspend */
if (machine_is_h1940() || machine_is_rx3715()) {
- reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
- BOOTMEM_DEFAULT);
- reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, 0x30003000, 0x1000);
+ reserve_bootmem_node(pgdat, 0x30081000, 0x1000);
}
#ifdef CONFIG_SA1111
@@ -654,8 +650,7 @@ void __init reserve_node_zero(pg_data_t *pgdat)
res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
#endif
if (res_size)
- reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
}
/*
diff --git a/trunk/arch/arm/mm/nommu.c b/trunk/arch/arm/mm/nommu.c
index 63c62fdea521..8cd3a60954f0 100644
--- a/trunk/arch/arm/mm/nommu.c
+++ b/trunk/arch/arm/mm/nommu.c
@@ -27,11 +27,9 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* Note that this can only be in node 0.
*/
#ifdef CONFIG_XIP_KERNEL
- reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
#else
- reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
#endif
/*
@@ -39,8 +37,7 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* some architectures which the DRAM is the exception vector to trap,
* alloc_page breaks with error, although it is not NULL, but "0."
*/
- reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE);
}
/*
diff --git a/trunk/arch/arm/plat-omap/fb.c b/trunk/arch/arm/plat-omap/fb.c
index 7854f19b77cf..ee40c1a0b83d 100644
--- a/trunk/arch/arm/plat-omap/fb.c
+++ b/trunk/arch/arm/plat-omap/fb.c
@@ -207,7 +207,7 @@ void __init omapfb_reserve_sdram(void)
return;
}
if (rg.paddr)
- reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT);
+ reserve_bootmem(rg.paddr, rg.size);
reserved += rg.size;
omapfb_config.mem_desc.region[i] = rg;
configured_regions++;
diff --git a/trunk/arch/avr32/kernel/setup.c b/trunk/arch/avr32/kernel/setup.c
index e66a07a928cd..4b4c1884e1c5 100644
--- a/trunk/arch/avr32/kernel/setup.c
+++ b/trunk/arch/avr32/kernel/setup.c
@@ -489,8 +489,7 @@ static void __init setup_bootmem(void)
/* Reserve space for the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
- bootmap_size,
- BOOTMEM_DEFAULT);
+ bootmap_size);
/* ...and any other reserved regions. */
for (res = reserved; res; res = res->sibling) {
@@ -506,8 +505,7 @@ static void __init setup_bootmem(void)
&& res->end < PFN_PHYS(max_pfn))
reserve_bootmem_node(
NODE_DATA(node), res->start,
- res->end - res->start + 1,
- BOOTMEM_DEFAULT);
+ res->end - res->start + 1);
}
node_set_online(node);
diff --git a/trunk/arch/avr32/lib/delay.c b/trunk/arch/avr32/lib/delay.c
index 9aa8800830f3..b3bc0b56e2c6 100644
--- a/trunk/arch/avr32/lib/delay.c
+++ b/trunk/arch/avr32/lib/delay.c
@@ -12,15 +12,13 @@
#include
#include
-#include
#include
#include
-#include
#include
#include
-int __devinit read_current_timer(unsigned long *timer_value)
+int read_current_timer(unsigned long *timer_value)
{
*timer_value = sysreg_read(COUNT);
return 0;
diff --git a/trunk/arch/blackfin/kernel/setup.c b/trunk/arch/blackfin/kernel/setup.c
index 6e106b3d7729..462cae893757 100644
--- a/trunk/arch/blackfin/kernel/setup.c
+++ b/trunk/arch/blackfin/kernel/setup.c
@@ -406,7 +406,7 @@ void __init setup_arch(char **cmdline_p)
*/
free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(memory_start, bootmap_size);
/*
* get kmalloc into gear
*/
diff --git a/trunk/arch/blackfin/mach-bf527/boards/ezkit.c b/trunk/arch/blackfin/mach-bf527/boards/ezkit.c
index 1795aab79064..f8c411a24af7 100644
--- a/trunk/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/trunk/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -37,7 +37,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include
#endif
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf533/boards/H8606.c b/trunk/arch/blackfin/mach-bf533/boards/H8606.c
index 97378b0a9753..a72c7a620fa1 100644
--- a/trunk/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/trunk/arch/blackfin/mach-bf533/boards/H8606.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include
#endif
-#include
+#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c b/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 886f260d9359..21df2f375497 100644
--- a/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/trunk/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -34,7 +34,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf533/boards/ezkit.c b/trunk/arch/blackfin/mach-bf533/boards/ezkit.c
index 4026c2f3ab4e..c37dd45c8803 100644
--- a/trunk/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/trunk/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -35,7 +35,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf533/boards/stamp.c b/trunk/arch/blackfin/mach-bf533/boards/stamp.c
index 0185350feacc..ac52b040b336 100644
--- a/trunk/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/trunk/arch/blackfin/mach-bf533/boards/stamp.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include
#endif
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c b/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c
index f7c1f964f13b..8703b67d5ec6 100644
--- a/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/trunk/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -36,7 +36,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf537/boards/generic_board.c b/trunk/arch/blackfin/mach-bf537/boards/generic_board.c
index 8a3397db1d21..3e52f3f5bd58 100644
--- a/trunk/arch/blackfin/mach-bf537/boards/generic_board.c
+++ b/trunk/arch/blackfin/mach-bf537/boards/generic_board.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include
#endif
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf537/boards/minotaur.c b/trunk/arch/blackfin/mach-bf537/boards/minotaur.c
index d71e0be33921..b8bbba85af53 100644
--- a/trunk/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/trunk/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -10,7 +10,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include
#endif
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf537/boards/stamp.c b/trunk/arch/blackfin/mach-bf537/boards/stamp.c
index 119e6ea83384..772541548b76 100644
--- a/trunk/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/trunk/arch/blackfin/mach-bf537/boards/stamp.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include
#endif
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c b/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c
index bf9e738a7c64..3a79a9061bdc 100644
--- a/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/trunk/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -34,7 +34,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/blackfin/mach-bf561/boards/ezkit.c b/trunk/arch/blackfin/mach-bf561/boards/ezkit.c
index ed863ce9a2d8..7601c3be1b5c 100644
--- a/trunk/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/trunk/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -35,7 +35,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/cris/kernel/setup.c b/trunk/arch/cris/kernel/setup.c
index 4da042e100a0..65466c49d7a9 100644
--- a/trunk/arch/cris/kernel/setup.c
+++ b/trunk/arch/cris/kernel/setup.c
@@ -137,7 +137,7 @@ setup_arch(char **cmdline_p)
* Arguments are start, size
*/
- reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size);
/* paging_init() sets up the MMU and marks all pages as reserved */
diff --git a/trunk/arch/frv/kernel/setup.c b/trunk/arch/frv/kernel/setup.c
index 6c01464db699..a74c08786b21 100644
--- a/trunk/arch/frv/kernel/setup.c
+++ b/trunk/arch/frv/kernel/setup.c
@@ -708,7 +708,7 @@ static void __init reserve_dma_coherent(void)
/*
* calibrate the delay loop
*/
-void __cpuinit calibrate_delay(void)
+void __init calibrate_delay(void)
{
loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
@@ -925,15 +925,13 @@ static void __init setup_linux_memory(void)
#endif
/* take back the memory occupied by the kernel image and the bootmem alloc map */
- reserve_bootmem(kstart, kend - kstart + bootmap_size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(kstart, kend - kstart + bootmap_size);
/* reserve the memory occupied by the initial ramdisk */
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START, INITRD_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(INITRD_START, INITRD_SIZE);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
}
@@ -988,10 +986,9 @@ static void __init setup_uclinux_memory(void)
/* now take back the bits the core kernel is occupying */
#ifndef CONFIG_PROTECT_KERNEL
- reserve_bootmem(kend, bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(kend, bootmap_size);
reserve_bootmem((unsigned long) &__kernel_image_start,
- kend - (unsigned long) &__kernel_image_start,
- BOOTMEM_DEFAULT);
+ kend - (unsigned long) &__kernel_image_start);
#else
dampr = __get_DAMPR(0);
@@ -999,15 +996,14 @@ static void __init setup_uclinux_memory(void)
dampr = (dampr >> 4) + 17;
dampr = 1 << dampr;
- reserve_bootmem(__get_DAMPR(0) & xAMPRx_PPFN, dampr, BOOTMEM_DEFAULT);
+ reserve_bootmem(__get_DAMPR(0) & xAMPRx_PPFN, dampr);
#endif
/* reserve some memory to do uncached DMA through if requested */
#ifdef CONFIG_RESERVE_DMA_COHERENT
if (dma_coherent_mem_start)
reserve_bootmem(dma_coherent_mem_start,
- dma_coherent_mem_end - dma_coherent_mem_start,
- BOOTMEM_DEFAULT);
+ dma_coherent_mem_end - dma_coherent_mem_start);
#endif
} /* end setup_uclinux_memory() */
diff --git a/trunk/arch/h8300/kernel/irq.c b/trunk/arch/h8300/kernel/irq.c
index 5a1b4cfea05b..8dec4dd57b4e 100644
--- a/trunk/arch/h8300/kernel/irq.c
+++ b/trunk/arch/h8300/kernel/irq.c
@@ -14,7 +14,6 @@
#include
#include
#include
-#include
#include
#include
diff --git a/trunk/arch/h8300/kernel/setup.c b/trunk/arch/h8300/kernel/setup.c
index cd3734614d9d..b2e86d0255e6 100644
--- a/trunk/arch/h8300/kernel/setup.c
+++ b/trunk/arch/h8300/kernel/setup.c
@@ -173,7 +173,7 @@ void __init setup_arch(char **cmdline_p)
* the bootmem bitmap so we then reserve it after freeing it :-)
*/
free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(memory_start, bootmap_size);
/*
* get kmalloc into gear
*/
diff --git a/trunk/arch/ia64/kernel/machine_kexec.c b/trunk/arch/ia64/kernel/machine_kexec.c
index 0823de1f6ebe..d6cd45f4c6c7 100644
--- a/trunk/arch/ia64/kernel/machine_kexec.c
+++ b/trunk/arch/ia64/kernel/machine_kexec.c
@@ -129,14 +129,13 @@ void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM)
+#if defined(CONFIG_ARCH_DISCONTIGMEM_ENABLE) && defined(CONFIG_NUMA)
VMCOREINFO_SYMBOL(pgdat_list);
VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES);
-#endif
-#ifdef CONFIG_NUMA
+
VMCOREINFO_SYMBOL(node_memblk);
VMCOREINFO_LENGTH(node_memblk, NR_NODE_MEMBLKS);
- VMCOREINFO_STRUCT_SIZE(node_memblk_s);
+ VMCOREINFO_SIZE(node_memblk_s);
VMCOREINFO_OFFSET(node_memblk_s, start_paddr);
VMCOREINFO_OFFSET(node_memblk_s, size);
#endif
diff --git a/trunk/arch/ia64/kernel/smpboot.c b/trunk/arch/ia64/kernel/smpboot.c
index 32ee5979a042..480b1a5085d5 100644
--- a/trunk/arch/ia64/kernel/smpboot.c
+++ b/trunk/arch/ia64/kernel/smpboot.c
@@ -120,6 +120,7 @@ static volatile unsigned long go[SLAVE + 1];
#define DEBUG_ITC_SYNC 0
+extern void __devinit calibrate_delay (void);
extern void start_ap (void);
extern unsigned long ia64_iobase;
@@ -476,7 +477,7 @@ start_secondary (void *unused)
return 0;
}
-struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
{
return NULL;
}
diff --git a/trunk/arch/ia64/mm/contig.c b/trunk/arch/ia64/mm/contig.c
index 344f64eca7a9..7e9c275ea148 100644
--- a/trunk/arch/ia64/mm/contig.c
+++ b/trunk/arch/ia64/mm/contig.c
@@ -218,7 +218,7 @@ find_memory (void)
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
- reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(bootmap_start, bootmap_size);
find_initrd();
diff --git a/trunk/arch/ia64/mm/discontig.c b/trunk/arch/ia64/mm/discontig.c
index ee5e68b2af94..0b567398f38e 100644
--- a/trunk/arch/ia64/mm/discontig.c
+++ b/trunk/arch/ia64/mm/discontig.c
@@ -299,12 +299,12 @@ static void __init reserve_pernode_space(void)
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
base = __pa(bdp->node_bootmem_map);
- reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pdp, base, size);
/* Now the per-node space */
size = mem_data[node].pernode_size;
base = __pa(mem_data[node].pernode_addr);
- reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pdp, base, size);
}
}
diff --git a/trunk/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/trunk/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 2c676cc05418..ab3eaf85fe4d 100644
--- a/trunk/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/trunk/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -100,11 +100,11 @@ u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg)
{
- struct pcibus_info *soft = arg;
+ struct pcibus_info *soft = (struct pcibus_info *)arg;
- if (sal_pcibr_error_interrupt(soft) < 0)
+ if (sal_pcibr_error_interrupt(soft) < 0) {
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
-
+ }
return IRQ_HANDLED;
}
diff --git a/trunk/arch/m32r/kernel/setup.c b/trunk/arch/m32r/kernel/setup.c
index f1f5db0c4084..d64814385d70 100644
--- a/trunk/arch/m32r/kernel/setup.c
+++ b/trunk/arch/m32r/kernel/setup.c
@@ -177,28 +177,25 @@ static unsigned long __init setup_memory(void)
*/
reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
(PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
- - CONFIG_MEMORY_START,
- BOOTMEM_DEFAULT);
+ - CONFIG_MEMORY_START);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE);
/*
* reserve memory hole
*/
#ifdef CONFIG_MEMHOLE
- reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START, INITRD_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(INITRD_START, INITRD_SIZE);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
diff --git a/trunk/arch/m32r/kernel/smpboot.c b/trunk/arch/m32r/kernel/smpboot.c
index 2c03ac1d005f..0e383da158e9 100644
--- a/trunk/arch/m32r/kernel/smpboot.c
+++ b/trunk/arch/m32r/kernel/smpboot.c
@@ -43,7 +43,6 @@
#include
#include
#include
-#include
#include
#include
#include
diff --git a/trunk/arch/m32r/mm/discontig.c b/trunk/arch/m32r/mm/discontig.c
index 07c1af7dc0e2..c7efdb0aefc5 100644
--- a/trunk/arch/m32r/mm/discontig.c
+++ b/trunk/arch/m32r/mm/discontig.c
@@ -91,8 +91,7 @@ unsigned long __init setup_memory(void)
PFN_PHYS(mp->pages));
reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
- PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size,
- BOOTMEM_DEFAULT);
+ PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size);
if (max_low_pfn < max_pfn)
max_low_pfn = max_pfn;
@@ -105,7 +104,7 @@ unsigned long __init setup_memory(void)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
reserve_bootmem_node(NODE_DATA(0), INITRD_START,
- INITRD_SIZE, BOOTMEM_DEFAULT);
+ INITRD_SIZE);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
diff --git a/trunk/arch/m68k/amiga/chipram.c b/trunk/arch/m68k/amiga/chipram.c
index cbe36538af47..d10726f9038b 100644
--- a/trunk/arch/m68k/amiga/chipram.c
+++ b/trunk/arch/m68k/amiga/chipram.c
@@ -32,10 +32,12 @@ void __init amiga_chip_init(void)
if (!AMIGAHW_PRESENT(CHIP_RAM))
return;
+#ifndef CONFIG_APUS_FAST_EXCEPT
/*
* Remove the first 4 pages where PPC exception handlers will be located
*/
amiga_chip_size -= 0x4000;
+#endif
chipram_res.end = amiga_chip_size-1;
request_resource(&iomem_resource, &chipram_res);
diff --git a/trunk/arch/m68k/amiga/cia.c b/trunk/arch/m68k/amiga/cia.c
index 343fab49bd9a..c4a4ffd45bc0 100644
--- a/trunk/arch/m68k/amiga/cia.c
+++ b/trunk/arch/m68k/amiga/cia.c
@@ -84,7 +84,7 @@ unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
static irqreturn_t cia_handler(int irq, void *dev_id)
{
- struct ciabase *base = dev_id;
+ struct ciabase *base = (struct ciabase *)dev_id;
int mach_irq;
unsigned char ints;
diff --git a/trunk/arch/m68k/atari/stram.c b/trunk/arch/m68k/atari/stram.c
index 0055a6c06f75..8dda6515887a 100644
--- a/trunk/arch/m68k/atari/stram.c
+++ b/trunk/arch/m68k/atari/stram.c
@@ -154,7 +154,7 @@ void __init atari_stram_reserve_pages(void *start_mem)
/* always reserve first page of ST-RAM, the first 2 kB are
* supervisor-only! */
if (!kernel_in_stram)
- reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem (0, PAGE_SIZE);
}
diff --git a/trunk/arch/m68k/kernel/setup.c b/trunk/arch/m68k/kernel/setup.c
index 9a06c48edcb3..ed3a4caec620 100644
--- a/trunk/arch/m68k/kernel/setup.c
+++ b/trunk/arch/m68k/kernel/setup.c
@@ -323,8 +323,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
- m68k_ramdisk.addr, m68k_ramdisk.size,
- BOOTMEM_DEFAULT);
+ m68k_ramdisk.addr, m68k_ramdisk.size);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
diff --git a/trunk/arch/m68knommu/kernel/setup.c b/trunk/arch/m68knommu/kernel/setup.c
index 156c6c662c7e..81507c53d4a9 100644
--- a/trunk/arch/m68knommu/kernel/setup.c
+++ b/trunk/arch/m68knommu/kernel/setup.c
@@ -203,7 +203,7 @@ void __init setup_arch(char **cmdline_p)
* the bootmem bitmap so we then reserve it after freeing it :-)
*/
free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(memory_start, bootmap_size);
/*
* Get kmalloc into gear.
diff --git a/trunk/arch/m68knommu/lib/memcpy.c b/trunk/arch/m68knommu/lib/memcpy.c
index b50dbcad4746..0d5577569e4c 100644
--- a/trunk/arch/m68knommu/lib/memcpy.c
+++ b/trunk/arch/m68knommu/lib/memcpy.c
@@ -1,5 +1,6 @@
#include
+#include
void * memcpy(void * to, const void * from, size_t n)
{
diff --git a/trunk/arch/mips/au1000/common/gpio.c b/trunk/arch/mips/au1000/common/gpio.c
index 0b658f1db4ce..8527856aec45 100644
--- a/trunk/arch/mips/au1000/common/gpio.c
+++ b/trunk/arch/mips/au1000/common/gpio.c
@@ -27,6 +27,7 @@
* others have a second one : GPIO2
*/
+#include
#include
#include
#include
diff --git a/trunk/arch/mips/kernel/setup.c b/trunk/arch/mips/kernel/setup.c
index 39f3dfe134fb..c032409cba9b 100644
--- a/trunk/arch/mips/kernel/setup.c
+++ b/trunk/arch/mips/kernel/setup.c
@@ -232,7 +232,7 @@ static void __init finalize_initrd(void)
goto disable;
}
- reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
+ reserve_bootmem(__pa(initrd_start), size);
initrd_below_start_ok = 1;
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -413,7 +413,7 @@ static void __init bootmem_init(void)
/*
* Reserve the bootmap memory.
*/
- reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size);
/*
* Reserve initrd memory if needed.
diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c
index 9d41dab90a80..1e5dfc28294a 100644
--- a/trunk/arch/mips/kernel/smp.c
+++ b/trunk/arch/mips/kernel/smp.c
@@ -52,6 +52,7 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(cpu_online_map);
+extern void __init calibrate_delay(void);
extern void cpu_idle(void);
/* Number of TCs (or siblings in Intel speak) per CPU core */
diff --git a/trunk/arch/mips/kernel/sysirix.c b/trunk/arch/mips/kernel/sysirix.c
index 22fd41e946b2..4c477c7ff74a 100644
--- a/trunk/arch/mips/kernel/sysirix.c
+++ b/trunk/arch/mips/kernel/sysirix.c
@@ -356,7 +356,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
retval = NGROUPS_MAX;
goto out;
case 5:
- retval = sysctl_nr_open;
+ retval = NR_OPEN;
goto out;
case 6:
retval = 1;
diff --git a/trunk/arch/mips/sgi-ip27/ip27-memory.c b/trunk/arch/mips/sgi-ip27/ip27-memory.c
index bf438d02366e..e5e023f50a07 100644
--- a/trunk/arch/mips/sgi-ip27/ip27-memory.c
+++ b/trunk/arch/mips/sgi-ip27/ip27-memory.c
@@ -465,8 +465,7 @@ static void __init node_mem_init(cnodeid_t node)
free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
(slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
- ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size,
- BOOTMEM_DEFAULT);
+ ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
}
/*
diff --git a/trunk/arch/parisc/Kconfig.debug b/trunk/arch/parisc/Kconfig.debug
index bc989e522a04..9166bd117267 100644
--- a/trunk/arch/parisc/Kconfig.debug
+++ b/trunk/arch/parisc/Kconfig.debug
@@ -2,6 +2,15 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config DEBUG_RWLOCK
+ bool "Read-write spinlock debugging"
+ depends on DEBUG_KERNEL && SMP
+ help
+ If you say Y here then read-write lock processing will count how many
+ times it has tried to get the lock and issue an error message after
+ too many attempts. If you suspect a rwlock problem or a kernel
+ hacker asks for this option then say Y. Otherwise say N.
+
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
depends on DEBUG_KERNEL
diff --git a/trunk/arch/parisc/configs/a500_defconfig b/trunk/arch/parisc/configs/a500_defconfig
index ddacc72e38fb..ea071218a3ed 100644
--- a/trunk/arch/parisc/configs/a500_defconfig
+++ b/trunk/arch/parisc/configs/a500_defconfig
@@ -1050,6 +1050,7 @@ CONFIG_SCHED_DEBUG=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_DEBUG_RWLOCK is not set
# CONFIG_DEBUG_RODATA is not set
#
diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c
index eb80f5e33d7d..aa875fa43488 100644
--- a/trunk/arch/parisc/mm/init.c
+++ b/trunk/arch/parisc/mm/init.c
@@ -315,13 +315,11 @@ static void __init setup_bootmem(void)
#define PDC_CONSOLE_IO_IODC_SIZE 32768
reserve_bootmem_node(NODE_DATA(0), 0UL,
- (unsigned long)(PAGE0->mem_free +
- PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
+ (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
- (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
+ (unsigned long)(_end - _text));
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
- ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
+ ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
#ifndef CONFIG_DISCONTIGMEM
@@ -330,8 +328,7 @@ static void __init setup_bootmem(void)
for (i = 0; i < npmem_holes; i++) {
reserve_bootmem_node(NODE_DATA(0),
(pmem_holes[i].start_pfn << PAGE_SHIFT),
- (pmem_holes[i].pages << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
+ (pmem_holes[i].pages << PAGE_SHIFT));
}
#endif
@@ -349,8 +346,7 @@ static void __init setup_bootmem(void)
initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
- reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
- initrd_reserve, BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
}
}
#endif
diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c
index 3b26fbd6bec9..5cd3db5cae41 100644
--- a/trunk/arch/powerpc/kernel/time.c
+++ b/trunk/arch/powerpc/kernel/time.c
@@ -66,7 +66,6 @@
#include
#include
#include
-#include
#ifdef CONFIG_PPC_ISERIES
#include
#include
@@ -190,8 +189,6 @@ u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
EXPORT_SYMBOL(__cputime_clockt_factor);
-DEFINE_PER_CPU(unsigned long, cputime_last_delta);
-DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static void calc_cputime_factors(void)
{
@@ -260,8 +257,8 @@ void account_system_vtime(struct task_struct *tsk)
}
account_system_time(tsk, 0, delta);
account_system_time_scaled(tsk, deltascaled);
- per_cpu(cputime_last_delta, smp_processor_id()) = delta;
- per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
+ get_paca()->purrdelta = delta;
+ get_paca()->spurrdelta = deltascaled;
local_irq_restore(flags);
}
@@ -279,7 +276,10 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
get_paca()->user_time = 0;
account_user_time(tsk, utime);
- utimescaled = cputime_to_scaled(utime);
+ /* Estimate the scaled utime by scaling the real utime based
+ * on the last spurr to purr ratio */
+ utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta;
+ get_paca()->spurrdelta = get_paca()->purrdelta = 0;
account_user_time_scaled(tsk, utimescaled);
}
diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c
index ff5debf5eedd..e8122447f019 100644
--- a/trunk/arch/powerpc/mm/mem.c
+++ b/trunk/arch/powerpc/mm/mem.c
@@ -220,13 +220,12 @@ void __init do_init_bootmem(void)
lmb_size_bytes(&lmb.reserved, i) - 1;
if (addr < total_lowmem)
reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i),
- BOOTMEM_DEFAULT);
+ lmb_size_bytes(&lmb.reserved, i));
else if (lmb.reserved.region[i].base < total_lowmem) {
unsigned long adjusted_size = total_lowmem -
lmb.reserved.region[i].base;
reserve_bootmem(lmb.reserved.region[i].base,
- adjusted_size, BOOTMEM_DEFAULT);
+ adjusted_size);
}
}
#else
@@ -235,8 +234,7 @@ void __init do_init_bootmem(void)
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i),
- BOOTMEM_DEFAULT);
+ lmb_size_bytes(&lmb.reserved, i));
#endif
/* XXX need to clip this if using highmem? */
diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c
index bc60322d2436..c12adc3ddffd 100644
--- a/trunk/arch/powerpc/mm/numa.c
+++ b/trunk/arch/powerpc/mm/numa.c
@@ -675,7 +675,7 @@ void __init do_init_bootmem(void)
dbg("reserve_bootmem %lx %lx\n", physbase,
size);
reserve_bootmem_node(NODE_DATA(nid), physbase,
- size, BOOTMEM_DEFAULT);
+ size);
}
}
diff --git a/trunk/arch/powerpc/platforms/cell/Kconfig b/trunk/arch/powerpc/platforms/cell/Kconfig
index 3a963b4a9be0..2f169991896d 100644
--- a/trunk/arch/powerpc/platforms/cell/Kconfig
+++ b/trunk/arch/powerpc/platforms/cell/Kconfig
@@ -54,6 +54,13 @@ config SPU_FS_64K_LS
uses 4K pages. This can improve performances of applications
using multiple SPEs by lowering the TLB pressure on them.
+config SPU_TRACE
+ tristate "SPU event tracing support"
+ depends on SPU_FS && MARKERS
+ help
+ This option allows reading a trace of spu-related events through
+ the sputrace file in procfs.
+
config SPU_BASE
bool
default n
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/Makefile b/trunk/arch/powerpc/platforms/cell/spufs/Makefile
index d3a349fb42e5..99610a6361f2 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/trunk/arch/powerpc/platforms/cell/spufs/Makefile
@@ -4,6 +4,8 @@ spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o
+obj-$(CONFIG_SPU_TRACE) += sputrace.o
+
# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS := spu-
SPU_CC := $(SPU_CROSS)gcc
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/file.c b/trunk/arch/powerpc/platforms/cell/spufs/file.c
index 3fcd06418b01..1018acd1746b 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/file.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/file.c
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#include
@@ -358,6 +359,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start;
+ spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
+
offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= ps_size)
return NOPFN_SIGBUS;
@@ -375,11 +378,14 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) {
up_read(¤t->mm->mmap_sem);
+ spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+ spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
down_read(¤t->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+ spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
}
spu_release(ctx);
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/sched.c b/trunk/arch/powerpc/platforms/cell/spufs/sched.c
index 00d914232af1..5915343e2599 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/sched.c
@@ -39,6 +39,7 @@
#include
#include
#include
+#include
#include
#include
@@ -216,8 +217,8 @@ void do_notify_spus_active(void)
*/
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
- spu->number, spu->node);
+ spu_context_trace(spu_bind_context__enter, ctx, spu);
+
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
if (ctx->flags & SPU_CREATE_NOSCHED)
@@ -399,8 +400,8 @@ static int has_affinity(struct spu_context *ctx)
*/
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
- spu->pid, spu->number, spu->node);
+ spu_context_trace(spu_unbind_context__enter, ctx, spu);
+
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
if (spu->ctx->flags & SPU_CREATE_NOSCHED)
@@ -528,6 +529,8 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
struct spu *spu, *aff_ref_spu;
int node, n;
+ spu_context_nospu_trace(spu_get_idle__enter, ctx);
+
if (ctx->gang) {
mutex_lock(&ctx->gang->aff_mutex);
if (has_affinity(ctx)) {
@@ -546,8 +549,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
ctx->gang->aff_ref_spu = NULL;
mutex_unlock(&ctx->gang->aff_mutex);
-
- return NULL;
+ goto not_found;
}
mutex_unlock(&ctx->gang->aff_mutex);
}
@@ -565,12 +567,14 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
mutex_unlock(&cbe_spu_info[node].list_mutex);
}
+ not_found:
+ spu_context_nospu_trace(spu_get_idle__not_found, ctx);
return NULL;
found:
spu->alloc_state = SPU_USED;
mutex_unlock(&cbe_spu_info[node].list_mutex);
- pr_debug("Got SPU %d %d\n", spu->number, spu->node);
+ spu_context_trace(spu_get_idle__found, ctx, spu);
spu_init_channels(spu);
return spu;
}
@@ -587,6 +591,8 @@ static struct spu *find_victim(struct spu_context *ctx)
struct spu *spu;
int node, n;
+ spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
@@ -640,6 +646,8 @@ static struct spu *find_victim(struct spu_context *ctx)
goto restart;
}
+ spu_context_trace(__spu_deactivate__unload, ctx, spu);
+
mutex_lock(&cbe_spu_info[node].list_mutex);
cbe_spu_info[node].nr_active--;
spu_unbind_context(spu, victim);
@@ -822,6 +830,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
*/
void spu_deactivate(struct spu_context *ctx)
{
+ spu_context_nospu_trace(spu_deactivate__enter, ctx);
__spu_deactivate(ctx, 1, MAX_PRIO);
}
@@ -835,6 +844,7 @@ void spu_deactivate(struct spu_context *ctx)
*/
void spu_yield(struct spu_context *ctx)
{
+ spu_context_nospu_trace(spu_yield__enter, ctx);
if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
mutex_lock(&ctx->state_mutex);
__spu_deactivate(ctx, 0, MAX_PRIO);
@@ -864,11 +874,15 @@ static noinline void spusched_tick(struct spu_context *ctx)
goto out;
spu = ctx->spu;
+
+ spu_context_trace(spusched_tick__preempt, ctx, spu);
+
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
spu_unschedule(spu, ctx);
spu_add_to_rq(ctx);
} else {
+ spu_context_nospu_trace(spusched_tick__newslice, ctx);
ctx->time_slice++;
}
out:
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/spufs.h b/trunk/arch/powerpc/platforms/cell/spufs/spufs.h
index 0e114038ea6f..795a1b52538b 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/trunk/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -325,4 +325,9 @@ extern void spu_free_lscsa(struct spu_state *csa);
extern void spuctx_switch_state(struct spu_context *ctx,
enum spu_utilization_state new_state);
+#define spu_context_trace(name, ctx, spu) \
+ trace_mark(name, "%p %p", ctx, spu);
+#define spu_context_nospu_trace(name, ctx) \
+ trace_mark(name, "%p", ctx);
+
#endif
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/sputrace.c b/trunk/arch/powerpc/platforms/cell/spufs/sputrace.c
new file mode 100644
index 000000000000..2b1953f6f12e
--- /dev/null
+++ b/trunk/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2007 IBM Deutschland Entwicklung GmbH
+ * Released under GPL v2.
+ *
+ * Partially based on net/ipv4/tcp_probe.c.
+ *
+ * Simple tracing facility for spu contexts.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "spufs.h"
+
+struct spu_probe {
+ const char *name;
+ const char *format;
+ marker_probe_func *probe_func;
+};
+
+struct sputrace {
+ ktime_t tstamp;
+ int owner_tid; /* owner */
+ int curr_tid;
+ const char *name;
+ int number;
+};
+
+static int bufsize __read_mostly = 16384;
+MODULE_PARM_DESC(bufsize, "Log buffer size (number of records)");
+module_param(bufsize, int, 0);
+
+
+static DEFINE_SPINLOCK(sputrace_lock);
+static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
+static ktime_t sputrace_start;
+static unsigned long sputrace_head, sputrace_tail;
+static struct sputrace *sputrace_log;
+
+static int sputrace_used(void)
+{
+ return (sputrace_head - sputrace_tail) % bufsize;
+}
+
+static inline int sputrace_avail(void)
+{
+ return bufsize - sputrace_used();
+}
+
+static int sputrace_sprint(char *tbuf, int n)
+{
+ const struct sputrace *t = sputrace_log + sputrace_tail % bufsize;
+ struct timespec tv =
+ ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
+
+ return snprintf(tbuf, n,
+ "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n",
+ (unsigned long) tv.tv_sec,
+ (unsigned long) tv.tv_nsec,
+ t->owner_tid,
+ t->name,
+ t->curr_tid,
+ t->number);
+}
+
+static ssize_t sputrace_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int error = 0, cnt = 0;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ while (cnt < len) {
+ char tbuf[128];
+ int width;
+
+ error = wait_event_interruptible(sputrace_wait,
+ sputrace_used() > 0);
+ if (error)
+ break;
+
+ spin_lock(&sputrace_lock);
+ if (sputrace_head == sputrace_tail) {
+ spin_unlock(&sputrace_lock);
+ continue;
+ }
+
+ width = sputrace_sprint(tbuf, sizeof(tbuf));
+ if (width < len)
+ sputrace_tail = (sputrace_tail + 1) % bufsize;
+ spin_unlock(&sputrace_lock);
+
+ if (width >= len)
+ break;
+
+ error = copy_to_user(buf + cnt, tbuf, width);
+ if (error)
+ break;
+ cnt += width;
+ }
+
+ return cnt == 0 ? error : cnt;
+}
+
+static int sputrace_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&sputrace_lock);
+ sputrace_head = sputrace_tail = 0;
+ sputrace_start = ktime_get();
+ spin_unlock(&sputrace_lock);
+
+ return 0;
+}
+
+static const struct file_operations sputrace_fops = {
+ .owner = THIS_MODULE,
+ .open = sputrace_open,
+ .read = sputrace_read,
+};
+
+static void sputrace_log_item(const char *name, struct spu_context *ctx,
+ struct spu *spu)
+{
+ spin_lock(&sputrace_lock);
+ if (sputrace_avail() > 1) {
+ struct sputrace *t = sputrace_log + sputrace_head;
+
+ t->tstamp = ktime_get();
+ t->owner_tid = ctx->tid;
+ t->name = name;
+ t->curr_tid = current->pid;
+ t->number = spu ? spu->number : -1;
+
+ sputrace_head = (sputrace_head + 1) % bufsize;
+ } else {
+ printk(KERN_WARNING
+ "sputrace: lost samples due to full buffer.\n");
+ }
+ spin_unlock(&sputrace_lock);
+
+ wake_up(&sputrace_wait);
+}
+
+static void spu_context_event(const struct marker *mdata,
+ void *private, const char *format, ...)
+{
+ struct spu_probe *p = mdata->private;
+ va_list ap;
+ struct spu_context *ctx;
+ struct spu *spu;
+
+ va_start(ap, format);
+ ctx = va_arg(ap, struct spu_context *);
+ spu = va_arg(ap, struct spu *);
+
+ sputrace_log_item(p->name, ctx, spu);
+ va_end(ap);
+}
+
+static void spu_context_nospu_event(const struct marker *mdata,
+ void *private, const char *format, ...)
+{
+ struct spu_probe *p = mdata->private;
+ va_list ap;
+ struct spu_context *ctx;
+
+ va_start(ap, format);
+ ctx = va_arg(ap, struct spu_context *);
+
+ sputrace_log_item(p->name, ctx, NULL);
+ va_end(ap);
+}
+
+struct spu_probe spu_probes[] = {
+ { "spu_bind_context__enter", "%p %p", spu_context_event },
+ { "spu_unbind_context__enter", "%p %p", spu_context_event },
+ { "spu_get_idle__enter", "%p", spu_context_nospu_event },
+ { "spu_get_idle__found", "%p %p", spu_context_event },
+ { "spu_get_idle__not_found", "%p", spu_context_nospu_event },
+ { "spu_find_victim__enter", "%p", spu_context_nospu_event },
+ { "spusched_tick__preempt", "%p %p", spu_context_event },
+ { "spusched_tick__newslice", "%p", spu_context_nospu_event },
+ { "spu_yield__enter", "%p", spu_context_nospu_event },
+ { "spu_deactivate__enter", "%p", spu_context_nospu_event },
+ { "__spu_deactivate__unload", "%p %p", spu_context_event },
+ { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__wake", "%p %p", spu_context_event },
+ { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
+ { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
+ { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
+};
+
+static int __init sputrace_init(void)
+{
+ struct proc_dir_entry *entry;
+ int i, error = -ENOMEM;
+
+ sputrace_log = kcalloc(sizeof(struct sputrace),
+ bufsize, GFP_KERNEL);
+ if (!sputrace_log)
+ goto out;
+
+ entry = create_proc_entry("sputrace", S_IRUSR, NULL);
+ if (!entry)
+ goto out_free_log;
+ entry->proc_fops = &sputrace_fops;
+
+ for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
+ struct spu_probe *p = &spu_probes[i];
+
+ error = marker_probe_register(p->name, p->format,
+ p->probe_func, p);
+ if (error)
+ printk(KERN_INFO "Unable to register probe %s\n",
+ p->name);
+
+ error = marker_arm(p->name);
+ if (error)
+ printk(KERN_INFO "Unable to arm probe %s\n", p->name);
+ }
+
+ return 0;
+
+out_free_log:
+ kfree(sputrace_log);
+out:
+ return -ENOMEM;
+}
+
+static void __exit sputrace_exit(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spu_probes); i++)
+ marker_probe_unregister(spu_probes[i].name);
+
+ remove_proc_entry("sputrace", NULL);
+ kfree(sputrace_log);
+}
+
+module_init(sputrace_init);
+module_exit(sputrace_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/trunk/arch/powerpc/platforms/powermac/cpufreq_32.c b/trunk/arch/powerpc/platforms/powermac/cpufreq_32.c
index 792d3ce8112e..c04abcc28a7a 100644
--- a/trunk/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/trunk/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -113,6 +113,8 @@ static inline void debug_calc_bogomips(void)
* result. We backup/restore the value to avoid affecting the
* core cpufreq framework's own calculation.
*/
+ extern void calibrate_delay(void);
+
unsigned long save_lpj = loops_per_jiffy;
calibrate_delay();
loops_per_jiffy = save_lpj;
diff --git a/trunk/arch/ppc/8260_io/enet.c b/trunk/arch/ppc/8260_io/enet.c
index ec1defea9c1e..25ef55bacd99 100644
--- a/trunk/arch/ppc/8260_io/enet.c
+++ b/trunk/arch/ppc/8260_io/enet.c
@@ -418,7 +418,7 @@ scc_enet_rx(struct net_device *dev)
struct sk_buff *skb;
ushort pkt_len;
- cep = dev->priv;
+ cep = (struct scc_enet_private *)dev->priv;
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
diff --git a/trunk/arch/ppc/8260_io/fcc_enet.c b/trunk/arch/ppc/8260_io/fcc_enet.c
index bcc3aa9d04f3..a3a27dafff1f 100644
--- a/trunk/arch/ppc/8260_io/fcc_enet.c
+++ b/trunk/arch/ppc/8260_io/fcc_enet.c
@@ -682,7 +682,7 @@ fcc_enet_rx(struct net_device *dev)
struct sk_buff *skb;
ushort pkt_len;
- cep = dev->priv;
+ cep = (struct fcc_enet_private *)dev->priv;
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
diff --git a/trunk/arch/ppc/kernel/vmlinux.lds.S b/trunk/arch/ppc/kernel/vmlinux.lds.S
index 8a24bc47eb6c..52b64fcbdfc5 100644
--- a/trunk/arch/ppc/kernel/vmlinux.lds.S
+++ b/trunk/arch/ppc/kernel/vmlinux.lds.S
@@ -143,6 +143,11 @@ SECTIONS
. = ALIGN(4096);
__init_end = .;
+
+ . = ALIGN(4096);
+ _sextratext = .;
+ _eextratext = .;
+
__bss_start = .;
.bss :
{
diff --git a/trunk/arch/ppc/platforms/prep_setup.c b/trunk/arch/ppc/platforms/prep_setup.c
index 38449855d5ff..3c56654bfc6f 100644
--- a/trunk/arch/ppc/platforms/prep_setup.c
+++ b/trunk/arch/ppc/platforms/prep_setup.c
@@ -91,11 +91,20 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
#define cached_21 (((char *)(ppc_cached_irq_mask))[3])
#define cached_A1 (((char *)(ppc_cached_irq_mask))[2])
+#ifdef CONFIG_SOUND_CS4232
+long ppc_cs4232_dma, ppc_cs4232_dma2;
+#endif
+
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern int probingmem;
extern unsigned long loops_per_jiffy;
+#ifdef CONFIG_SOUND_CS4232
+EXPORT_SYMBOL(ppc_cs4232_dma);
+EXPORT_SYMBOL(ppc_cs4232_dma2);
+#endif
+
/* useful ISA ports */
#define PREP_SYSCTL 0x81c
/* present in the IBM reference design; possibly identical in Mot boxes: */
@@ -560,6 +569,74 @@ prep_show_percpuinfo(struct seq_file *m, int i)
return 0;
}
+#ifdef CONFIG_SOUND_CS4232
+static long __init masktoint(unsigned int i)
+{
+ int t = -1;
+ while (i >> ++t)
+ ;
+ return (t-1);
+}
+
+/*
+ * ppc_cs4232_dma and ppc_cs4232_dma2 are used in include/asm/dma.h
+ * to distinguish sound dma-channels from others. This is because
+ * blocksize on 16 bit dma-channels 5,6,7 is 128k, but
+ * the cs4232.c uses 64k like on 8 bit dma-channels 0,1,2,3
+ */
+
+static void __init prep_init_sound(void)
+{
+ PPC_DEVICE *audiodevice = NULL;
+
+ /*
+ * Get the needed resource information from residual data.
+ *
+ */
+ if (have_residual_data)
+ audiodevice = residual_find_device(~0, NULL,
+ MultimediaController, AudioController, -1, 0);
+
+ if (audiodevice != NULL) {
+ PnP_TAG_PACKET *pkt;
+
+ pkt = PnP_find_packet((unsigned char *)&res->DevicePnPHeap[audiodevice->AllocatedOffset],
+ S5_Packet, 0);
+ if (pkt != NULL)
+ ppc_cs4232_dma = masktoint(pkt->S5_Pack.DMAMask);
+ pkt = PnP_find_packet((unsigned char*)&res->DevicePnPHeap[audiodevice->AllocatedOffset],
+ S5_Packet, 1);
+ if (pkt != NULL)
+ ppc_cs4232_dma2 = masktoint(pkt->S5_Pack.DMAMask);
+ }
+
+ /*
+ * These are the PReP specs' defaults for the cs4231. We use these
+ * as fallback incase we don't have residual data.
+ * At least the IBM Thinkpad 850 with IDE DMA Channels at 6 and 7
+ * will use the other values.
+ */
+ if (audiodevice == NULL) {
+ switch (_prep_type) {
+ case _PREP_IBM:
+ ppc_cs4232_dma = 1;
+ ppc_cs4232_dma2 = -1;
+ break;
+ default:
+ ppc_cs4232_dma = 6;
+ ppc_cs4232_dma2 = 7;
+ }
+ }
+
+ /*
+ * Find a way to push this information to the cs4232 driver
+ * Give it out with printk, when not in cmd_line?
+ * Append it to cmd_line and boot_command_line?
+ * Format is cs4232=io,irq,dma,dma2
+ */
+}
+#endif /* CONFIG_SOUND_CS4232 */
+
/*
* Fill out screen_info according to the residual data. This allows us to use
* at least vesafb.
@@ -821,6 +898,10 @@ prep_setup_arch(void)
}
}
+#ifdef CONFIG_SOUND_CS4232
+ prep_init_sound();
+#endif /* CONFIG_SOUND_CS4232 */
+
prep_init_vesa();
switch (_prep_type) {
diff --git a/trunk/arch/s390/kernel/setup.c b/trunk/arch/s390/kernel/setup.c
index f9f8779022a0..29ae165d1749 100644
--- a/trunk/arch/s390/kernel/setup.c
+++ b/trunk/arch/s390/kernel/setup.c
@@ -649,24 +649,21 @@ setup_memory(void)
/*
* Reserve memory used for lowcore/command line/kernel image.
*/
- reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
+ reserve_bootmem(0, (unsigned long)_ehead);
reserve_bootmem((unsigned long)_stext,
- PFN_PHYS(start_pfn) - (unsigned long)_stext,
- BOOTMEM_DEFAULT);
+ PFN_PHYS(start_pfn) - (unsigned long)_stext);
/*
* Reserve the bootmem bitmap itself as well. We do this in two
* steps (first step was init_bootmem()) because this catches
* the (very unlikely) case of us accidentally initializing the
* bootmem allocator with an invalid RAM area.
*/
- reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE) {
if (INITRD_START + INITRD_SIZE <= memory_end) {
- reserve_bootmem(INITRD_START, INITRD_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(INITRD_START, INITRD_SIZE);
initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE;
} else {
diff --git a/trunk/arch/sh/boards/landisk/setup.c b/trunk/arch/sh/boards/landisk/setup.c
index 2b708ec72558..eda71763ecc5 100644
--- a/trunk/arch/sh/boards/landisk/setup.c
+++ b/trunk/arch/sh/boards/landisk/setup.c
@@ -14,7 +14,7 @@
*/
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/sh/boards/lboxre2/setup.c b/trunk/arch/sh/boards/lboxre2/setup.c
index c74440d38ee9..9c830fdc411b 100644
--- a/trunk/arch/sh/boards/lboxre2/setup.c
+++ b/trunk/arch/sh/boards/lboxre2/setup.c
@@ -13,7 +13,7 @@
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/sh/boards/renesas/r7780rp/setup.c b/trunk/arch/sh/boards/renesas/r7780rp/setup.c
index f7a8d5c9d510..a43b47726f54 100644
--- a/trunk/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/trunk/arch/sh/boards/renesas/r7780rp/setup.c
@@ -15,7 +15,7 @@
*/
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c b/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c
index a0ef81b7de37..3452b072adde 100644
--- a/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c
+++ b/trunk/arch/sh/boards/renesas/rts7751r2d/setup.c
@@ -10,7 +10,7 @@
*/
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/sh/boards/renesas/sdk7780/setup.c b/trunk/arch/sh/boards/renesas/sdk7780/setup.c
index acc5932587f1..5df32f201870 100644
--- a/trunk/arch/sh/boards/renesas/sdk7780/setup.c
+++ b/trunk/arch/sh/boards/renesas/sdk7780/setup.c
@@ -11,7 +11,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/sh/boards/se/7722/setup.c b/trunk/arch/sh/boards/se/7722/setup.c
index b1a3d9d0172f..eb97dca5b736 100644
--- a/trunk/arch/sh/boards/se/7722/setup.c
+++ b/trunk/arch/sh/boards/se/7722/setup.c
@@ -12,7 +12,7 @@
*/
#include
#include
-#include
+#include
#include
#include
#include
diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c
index 18a5baf2cbad..855cdf9d85b1 100644
--- a/trunk/arch/sh/kernel/setup.c
+++ b/trunk/arch/sh/kernel/setup.c
@@ -140,26 +140,18 @@ static void __init reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, free_mem,
&crash_size, &crash_base);
if (ret == 0 && crash_size) {
- if (crash_base <= 0) {
+ if (crash_base > 0) {
+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crash_base >> 20),
+ (unsigned long)(free_mem >> 20));
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ reserve_bootmem(crash_base, crash_size);
+ } else
printk(KERN_INFO "crashkernel reservation failed - "
"you have to specify a base address\n");
- return;
- }
-
- if (reserve_bootmem(crash_base, crash_size,
- BOOTMEM_EXCLUSIVE) < 0) {
- printk(KERN_INFO "crashkernel reservation failed - "
- "memory is in use\n");
- return;
- }
-
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(free_mem >> 20));
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
}
}
#else
@@ -192,14 +184,13 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
* an invalid RAM area.
*/
reserve_bootmem(__MEMORY_START+PAGE_SIZE,
- (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START,
- BOOTMEM_DEFAULT);
+ (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem(__MEMORY_START, PAGE_SIZE);
sparse_memory_present_with_active_regions(0);
@@ -209,7 +200,7 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START + __MEMORY_START,
- INITRD_SIZE, BOOTMEM_DEFAULT);
+ INITRD_SIZE);
initrd_start = INITRD_START + PAGE_OFFSET +
__MEMORY_START;
initrd_end = initrd_start + INITRD_SIZE;
diff --git a/trunk/arch/sh/mm/numa.c b/trunk/arch/sh/mm/numa.c
index 2de7302724fc..8aff065dd307 100644
--- a/trunk/arch/sh/mm/numa.c
+++ b/trunk/arch/sh/mm/numa.c
@@ -80,9 +80,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Reserve the pgdat and bootmap space with the bootmem allocator */
reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
- sizeof(struct pglist_data), BOOTMEM_DEFAULT);
+ sizeof(struct pglist_data));
reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT,
- bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
+ bootmap_pages << PAGE_SHIFT);
/* It's up */
node_set_online(nid);
diff --git a/trunk/arch/sparc/kernel/sun4d_smp.c b/trunk/arch/sparc/kernel/sun4d_smp.c
index 0def48158c7d..89a6de95070c 100644
--- a/trunk/arch/sparc/kernel/sun4d_smp.c
+++ b/trunk/arch/sparc/kernel/sun4d_smp.c
@@ -19,12 +19,12 @@
#include
#include
#include
-#include
#include
#include
#include
+#include
#include
#include
#include
@@ -41,6 +41,8 @@
extern ctxd_t *srmmu_ctx_table_phys;
+extern void calibrate_delay(void);
+
static volatile int smp_processors_ready = 0;
static int smp_highest_cpu;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
diff --git a/trunk/arch/sparc/kernel/sun4m_smp.c b/trunk/arch/sparc/kernel/sun4m_smp.c
index 0b9407267162..730eb5796f8e 100644
--- a/trunk/arch/sparc/kernel/sun4m_smp.c
+++ b/trunk/arch/sparc/kernel/sun4m_smp.c
@@ -16,8 +16,6 @@
#include
#include
#include
-#include
-
#include
#include
#include
@@ -25,6 +23,7 @@
#include
#include
+#include
#include
#include
#include
@@ -40,6 +39,8 @@
extern ctxd_t *srmmu_ctx_table_phys;
+extern void calibrate_delay(void);
+
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id;
diff --git a/trunk/arch/sparc/kernel/systbls.S b/trunk/arch/sparc/kernel/systbls.S
index 9064485dc40b..ee010f4532a0 100644
--- a/trunk/arch/sparc/kernel/systbls.S
+++ b/trunk/arch/sparc/kernel/systbls.S
@@ -79,8 +79,7 @@ sys_call_table:
/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
-/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime
+/*310*/ .long sys_utimensat, sys_signalfd, sys_ni_syscall, sys_eventfd, sys_fallocate
#ifdef CONFIG_SUNOS_EMUL
/* Now the SunOS syscall table. */
@@ -198,7 +197,6 @@ sunos_sys_table:
.long sunos_nosys, sunos_nosys, sunos_nosys
.long sunos_nosys
/*310*/ .long sunos_nosys, sunos_nosys, sunos_nosys
- .long sunos_nosys, sunos_nosys, sunos_nosys
- .long sunos_nosys
+ .long sunos_nosys, sunos_nosys
#endif
diff --git a/trunk/arch/sparc/mm/init.c b/trunk/arch/sparc/mm/init.c
index b89837accc88..a1bef07755a9 100644
--- a/trunk/arch/sparc/mm/init.c
+++ b/trunk/arch/sparc/mm/init.c
@@ -259,7 +259,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
if (initrd_start) {
/* Reserve the initrd image area. */
size = initrd_end - initrd_start;
- reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
+ reserve_bootmem(initrd_start, size);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
@@ -268,7 +268,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
#endif
/* Reserve the kernel text/data/bss. */
size = (start_pfn << PAGE_SHIFT) - phys_base;
- reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT);
+ reserve_bootmem(phys_base, size);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
/* Reserve the bootmem map. We do not account for it
@@ -276,7 +276,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
* in free_all_bootmem.
*/
size = bootmap_size;
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
return max_pfn;
diff --git a/trunk/arch/sparc64/defconfig b/trunk/arch/sparc64/defconfig
index 833d74b2b192..f62d9f6c5e2a 100644
--- a/trunk/arch/sparc64/defconfig
+++ b/trunk/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24
-# Tue Feb 5 17:28:19 2008
+# Linux kernel version: 2.6.24-rc4
+# Tue Dec 4 00:37:59 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -17,7 +17,6 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_AUDIT_ARCH=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
@@ -31,15 +30,13 @@ CONFIG_HZ_100=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=100
-# CONFIG_SCHED_HRTICK is not set
-CONFIG_HOTPLUG_CPU=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_LOCK_KERNEL=y
+CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
@@ -79,7 +76,6 @@ CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y
@@ -87,14 +83,6 @@ CONFIG_SLUB_DEBUG=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
-CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
-CONFIG_OPROFILE=m
-CONFIG_HAVE_OPROFILE=y
-CONFIG_KPROBES=y
-CONFIG_HAVE_KPROBES=y
-CONFIG_PROC_PAGE_MONITOR=y
-CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
@@ -104,7 +92,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
-CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BLK_DEV_BSG=y
@@ -122,8 +109,6 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
-CONFIG_CLASSIC_RCU=y
-# CONFIG_PREEMPT_RCU is not set
CONFIG_SYSVIPC_COMPAT=y
CONFIG_GENERIC_HARDIRQS=y
@@ -134,8 +119,7 @@ CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=64
+# CONFIG_SMP is not set
# CONFIG_CPU_FREQ is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
@@ -185,12 +169,9 @@ CONFIG_BINFMT_ELF32=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
CONFIG_SOLARIS_EMUL=y
-CONFIG_SCHED_SMT=y
-CONFIG_SCHED_MC=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
-# CONFIG_RCU_TRACE is not set
# CONFIG_CMDLINE_BOOL is not set
#
@@ -208,7 +189,6 @@ CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
CONFIG_XFRM_MIGRATE=y
-# CONFIG_XFRM_STATISTICS is not set
CONFIG_NET_KEY=m
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
@@ -269,9 +249,9 @@ CONFIG_IP_DCCP_ACKVEC=y
CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_TFRC_LIB=m
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
-CONFIG_IP_DCCP_TFRC_LIB=m
#
# DCCP Kernel Hacking
@@ -299,7 +279,6 @@ CONFIG_VLAN_8021Q=m
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
@@ -364,7 +343,6 @@ CONFIG_BLK_DEV_IDE=y
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
# CONFIG_BLK_DEV_IDETAPE is not set
# CONFIG_BLK_DEV_IDEFLOPPY is not set
# CONFIG_BLK_DEV_IDESCSI is not set
@@ -381,6 +359,7 @@ CONFIG_IDE_GENERIC=y
# PCI IDE chipsets support
#
CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
CONFIG_IDEPCI_PCIBUS_ORDER=y
# CONFIG_BLK_DEV_GENERIC is not set
# CONFIG_BLK_DEV_OPTI621 is not set
@@ -410,6 +389,7 @@ CONFIG_BLK_DEV_ALI15X3=y
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_BLK_DEV_TC86C001 is not set
+# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
CONFIG_IDE_ARCH_OBSOLETE_INIT=y
# CONFIG_BLK_DEV_HD is not set
@@ -521,6 +501,7 @@ CONFIG_NETDEVICES=y
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
+# CONFIG_IP1000 is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
@@ -552,7 +533,6 @@ CONFIG_NET_PCI=y
# CONFIG_NE2K_PCI is not set
# CONFIG_8139CP is not set
# CONFIG_8139TOO is not set
-# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
@@ -565,9 +545,6 @@ CONFIG_E1000=m
CONFIG_E1000_NAPI=y
# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
# CONFIG_E1000E is not set
-# CONFIG_E1000E_ENABLED is not set
-# CONFIG_IP1000 is not set
-# CONFIG_IGB is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
@@ -593,7 +570,6 @@ CONFIG_NETDEV_10000=y
CONFIG_NIU=m
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
-# CONFIG_BNX2X is not set
# CONFIG_TR is not set
#
@@ -626,6 +602,7 @@ CONFIG_PPPOE=m
# CONFIG_SLIP is not set
CONFIG_SLHC=m
# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -702,7 +679,6 @@ CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
# CONFIG_SERIAL_NONSTANDARD is not set
-# CONFIG_NOZOMI is not set
#
# Serial drivers
@@ -771,13 +747,13 @@ CONFIG_I2C_ALGOBIT=y
#
# Miscellaneous I2C Chip support
#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
# CONFIG_DS1682 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
@@ -1014,7 +990,6 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CA0106 is not set
# CONFIG_SND_CMIPCI is not set
-# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
# CONFIG_SND_DARLA20 is not set
@@ -1039,7 +1014,6 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_HDA_INTEL is not set
# CONFIG_SND_HDSP is not set
# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_HIFIER is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
@@ -1057,7 +1031,6 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VIA82XX_MODEM is not set
-# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
# CONFIG_SND_YMFPCI is not set
# CONFIG_SND_AC97_POWER_SAVE is not set
@@ -1084,10 +1057,6 @@ CONFIG_SND_SUN_CS4231=m
# SoC Audio support for SuperH
#
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
#
# Open Sound System
#
@@ -1111,7 +1080,6 @@ CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
# CONFIG_USB_DEBUG is not set
-# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
@@ -1125,6 +1093,7 @@ CONFIG_USB_DEVICEFS=y
# USB Host Controller Drivers
#
CONFIG_USB_EHCI_HCD=m
+# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
# CONFIG_USB_ISP116X_HCD is not set
@@ -1174,6 +1143,10 @@ CONFIG_USB_STORAGE=m
#
# USB port drivers
#
+
+#
+# USB Serial Converter support
+#
# CONFIG_USB_SERIAL is not set
#
@@ -1199,6 +1172,14 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
# CONFIG_NEW_LEDS is not set
@@ -1351,6 +1332,11 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
+CONFIG_INSTRUMENTATION=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+# CONFIG_MARKERS is not set
#
# Kernel hacking
@@ -1388,8 +1374,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_FORCED_INLINING=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_KPROBES_SANITY_TEST is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_SAMPLES is not set
@@ -1412,9 +1396,8 @@ CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_BLKCIPHER=y
-# CONFIG_CRYPTO_SEQIV is not set
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_HMAC=y
@@ -1433,9 +1416,6 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_XTS=m
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_FCRYPT=m
@@ -1451,16 +1431,13 @@ CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_SEED=m
-# CONFIG_CRYPTO_SALSA20 is not set
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_AUTHENC=m
-# CONFIG_CRYPTO_LZO is not set
CONFIG_CRYPTO_HW=y
-# CONFIG_CRYPTO_DEV_HIFN_795X is not set
#
# Library routines
diff --git a/trunk/arch/sparc64/kernel/Makefile b/trunk/arch/sparc64/kernel/Makefile
index 4b78b24ef413..ef50d217432f 100644
--- a/trunk/arch/sparc64/kernel/Makefile
+++ b/trunk/arch/sparc64/kernel/Makefile
@@ -11,7 +11,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
traps.o auxio.o una_asm.o sysfs.o iommu.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
- power.o sbus.o sparc64_ksyms.o chmc.o \
+ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/trunk/arch/sparc64/kernel/iommu.c b/trunk/arch/sparc64/kernel/iommu.c
index 5623a4d59dff..4b9115a4d92e 100644
--- a/trunk/arch/sparc64/kernel/iommu.c
+++ b/trunk/arch/sparc64/kernel/iommu.c
@@ -472,15 +472,94 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
+
+static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
+ int nused, int nelems,
+ unsigned long iopte_protection)
+{
+ struct scatterlist *dma_sg = sg;
+ int i;
+
+ for (i = 0; i < nused; i++) {
+ unsigned long pteval = ~0UL;
+ u32 dma_npages;
+
+ dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
+ dma_sg->dma_length +
+ ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
+ do {
+ unsigned long offset;
+ signed int len;
+
+ /* If we are here, we know we have at least one
+ * more page to map. So walk forward until we
+ * hit a page crossing, and begin creating new
+ * mappings from that spot.
+ */
+ for (;;) {
+ unsigned long tmp;
+
+ tmp = SG_ENT_PHYS_ADDRESS(sg);
+ len = sg->length;
+ if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = tmp & IO_PAGE_MASK;
+ offset = tmp & (IO_PAGE_SIZE - 1UL);
+ break;
+ }
+ if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
+ offset = 0UL;
+ len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
+ break;
+ }
+ sg = sg_next(sg);
+ nelems--;
+ }
+
+ pteval = iopte_protection | (pteval & IOPTE_PAGE);
+ while (len > 0) {
+ *iopte++ = __iopte(pteval);
+ pteval += IO_PAGE_SIZE;
+ len -= (IO_PAGE_SIZE - offset);
+ offset = 0;
+ dma_npages--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE) + len;
+ sg = sg_next(sg);
+ nelems--;
+
+ /* Skip over any tail mappings we've fully mapped,
+ * adjusting pteval along the way. Stop when we
+ * detect a page crossing event.
+ */
+ while (nelems &&
+ (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+ (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
+ ((pteval ^
+ (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
+ pteval += sg->length;
+ sg = sg_next(sg);
+ nelems--;
+ }
+ if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
+ pteval = ~0UL;
+ } while (dma_npages != 0);
+ dma_sg = sg_next(dma_sg);
+ }
+}
+
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- unsigned long flags, ctx, i, npages, iopte_protection;
- struct scatterlist *sg;
- struct strbuf *strbuf;
struct iommu *iommu;
+ struct strbuf *strbuf;
+ unsigned long flags, ctx, npages, iopte_protection;
iopte_t *base;
u32 dma_base;
+ struct scatterlist *sgtmp;
+ int used;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
@@ -499,7 +578,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- npages = calc_npages(sglist, nelems);
+ /* Step 1: Prepare scatter list. */
+
+ npages = prepare_sg(dev, sglist, nelems);
+
+ /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
@@ -516,6 +599,18 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
dma_base = iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT);
+ /* Step 3: Normalize DMA addresses. */
+ used = nelems;
+
+ sgtmp = sglist;
+ while (used && sgtmp->dma_length) {
+ sgtmp->dma_address += dma_base;
+ sgtmp = sg_next(sgtmp);
+ used--;
+ }
+ used = nelems - used;
+
+ /* Step 4: Create the mappings. */
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@@ -523,27 +618,13 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
- for_each_sg(sglist, sg, nelems, i) {
- unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
- unsigned long slen = sg->length;
- unsigned long this_npages;
-
- this_npages = iommu_num_pages(paddr, slen);
-
- sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
- sg->dma_length = slen;
-
- paddr &= IO_PAGE_MASK;
- while (this_npages--) {
- iopte_val(*base) = iopte_protection | paddr;
+ fill_sg(base, sglist, used, nelems, iopte_protection);
- base++;
- paddr += IO_PAGE_SIZE;
- dma_base += IO_PAGE_SIZE;
- }
- }
+#ifdef VERIFY_SG
+ verify_sglist(sglist, nelems, base, npages);
+#endif
- return nelems;
+ return used;
bad:
iommu_free_ctx(iommu, ctx);
@@ -556,10 +637,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- unsigned long flags, ctx, i, npages;
- struct strbuf *strbuf;
struct iommu *iommu;
+ struct strbuf *strbuf;
iopte_t *base;
+ unsigned long flags, ctx, i, npages;
+ struct scatterlist *sg, *sgprv;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) {
@@ -572,7 +654,15 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = sglist->dma_address & IO_PAGE_MASK;
- npages = calc_npages(sglist, nelems);
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
+ break;
+ sgprv = sg;
+ }
+
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
+ bus_addr) >> IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
diff --git a/trunk/arch/sparc64/kernel/iommu_common.c b/trunk/arch/sparc64/kernel/iommu_common.c
new file mode 100644
index 000000000000..72a4acfe8c7b
--- /dev/null
+++ b/trunk/arch/sparc64/kernel/iommu_common.c
@@ -0,0 +1,248 @@
+/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
+ * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include
+#include "iommu_common.h"
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+
+#ifdef VERIFY_SG
+static int verify_lengths(struct scatterlist *sglist, int nents, int npages)
+{
+ int sg_len, dma_len;
+ int i, pgcount;
+ struct scatterlist *sg;
+
+ sg_len = 0;
+ for_each_sg(sglist, sg, nents, i)
+ sg_len += sg->length;
+
+ dma_len = 0;
+ for_each_sg(sglist, sg, nents, i) {
+ if (!sg->dma_length)
+ break;
+ dma_len += sg->dma_length;
+ }
+
+ if (sg_len != dma_len) {
+ printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
+ sg_len, dma_len);
+ return -1;
+ }
+
+ pgcount = 0;
+ for_each_sg(sglist, sg, nents, i) {
+ unsigned long start, end;
+
+ if (!sg->dma_length)
+ break;
+
+ start = sg->dma_address;
+ start = start & IO_PAGE_MASK;
+
+ end = sg->dma_address + sg->dma_length;
+ end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
+
+ pgcount += ((end - start) >> IO_PAGE_SHIFT);
+ }
+
+ if (pgcount != npages) {
+ printk("verify_lengths: Error, page count wrong, "
+ "npages[%d] pgcount[%d]\n",
+ npages, pgcount);
+ return -1;
+ }
+
+ /* This test passes... */
+ return 0;
+}
+
+static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
+{
+ struct scatterlist *sg = *__sg;
+ iopte_t *iopte = *__iopte;
+ u32 dlen = dma_sg->dma_length;
+ u32 daddr;
+ unsigned int sglen;
+ unsigned long sgaddr;
+
+ daddr = dma_sg->dma_address;
+ sglen = sg->length;
+ sgaddr = (unsigned long) sg_virt(sg);
+ while (dlen > 0) {
+ unsigned long paddr;
+
+ /* SG and DMA_SG must begin at the same sub-page boundary. */
+ if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
+ printk("verify_one_map: Wrong start offset "
+ "sg[%08lx] dma[%08x]\n",
+ sgaddr, daddr);
+ nents = -1;
+ goto out;
+ }
+
+ /* Verify the IOPTE points to the right page. */
+ paddr = iopte_val(*iopte) & IOPTE_PAGE;
+ if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
+ printk("verify_one_map: IOPTE[%08lx] maps the "
+ "wrong page, should be [%08lx]\n",
+ iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
+ nents = -1;
+ goto out;
+ }
+
+ /* If this SG crosses a page, adjust to that next page
+ * boundary and loop.
+ */
+ if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
+ unsigned long next_page, diff;
+
+ next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
+ diff = next_page - sgaddr;
+ sgaddr += diff;
+ daddr += diff;
+ sglen -= diff;
+ dlen -= diff;
+ if (dlen > 0)
+ iopte++;
+ continue;
+ }
+
+ /* SG wholly consumed within this page. */
+ daddr += sglen;
+ dlen -= sglen;
+
+ if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
+ iopte++;
+
+ sg = sg_next(sg);
+ if (--nents <= 0)
+ break;
+ sgaddr = (unsigned long) sg_virt(sg);
+ sglen = sg->length;
+ }
+ if (dlen < 0) {
+ /* Transfer overrun, big problems. */
+ printk("verify_one_map: Transfer overrun by %d bytes.\n",
+ -dlen);
+ nents = -1;
+ } else {
+ /* Advance to next dma_sg implies that the next iopte will
+ * begin it.
+ */
+ iopte++;
+ }
+
+out:
+ *__sg = sg;
+ *__iopte = iopte;
+ return nents;
+}
+
+static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
+{
+ struct scatterlist *dma_sg = sg;
+ struct scatterlist *orig_dma_sg = dma_sg;
+ int orig_nents = nents;
+
+ for (;;) {
+ nents = verify_one_map(dma_sg, &sg, nents, &iopte);
+ if (nents <= 0)
+ break;
+ dma_sg = sg_next(dma_sg);
+ if (dma_sg->dma_length == 0)
+ break;
+ }
+
+ if (nents > 0) {
+ printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
+ nents);
+ return -1;
+ }
+
+ if (nents < 0) {
+ printk("verify_maps: Error, messed up mappings, "
+ "at sg %d dma_sg %d\n",
+ (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
+ return -1;
+ }
+
+ /* This test passes... */
+ return 0;
+}
+
+void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int npages)
+{
+ struct scatterlist *sg;
+
+ if (verify_lengths(sglist, nents, npages) < 0 ||
+ verify_maps(sglist, nents, iopte) < 0) {
+ int i;
+
+ printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
+ printk("%016lx.\n", sglist->dma_address & IO_PAGE_MASK);
+
+ for_each_sg(sglist, sg, nents, i) {
+ printk("sg(%d): page_addr(%p) off(%x) length(%x) "
+ "dma_address[%016x] dma_length[%016x]\n",
+ i,
+ page_address(sg_page(sg)), sg->offset,
+ sg->length,
+ sg->dma_address, sg->dma_length);
+ }
+ }
+
+ /* Seems to be ok */
+}
+#endif
+
+unsigned long prepare_sg(struct device *dev, struct scatterlist *sg, int nents)
+{
+ struct scatterlist *dma_sg = sg;
+ unsigned long prev;
+ u32 dent_addr, dent_len;
+ unsigned int max_seg_size;
+
+ prev = (unsigned long) sg_virt(sg);
+ prev += (unsigned long) (dent_len = sg->length);
+ dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
+ max_seg_size = dma_get_max_seg_size(dev);
+ while (--nents) {
+ unsigned long addr;
+
+ sg = sg_next(sg);
+ addr = (unsigned long) sg_virt(sg);
+ if (! VCONTIG(prev, addr) ||
+ dent_len + sg->length > max_seg_size) {
+ dma_sg->dma_address = dent_addr;
+ dma_sg->dma_length = dent_len;
+ dma_sg = sg_next(dma_sg);
+
+ dent_addr = ((dent_addr +
+ dent_len +
+ (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
+ dent_addr <<= IO_PAGE_SHIFT;
+ dent_addr += addr & (IO_PAGE_SIZE - 1UL);
+ dent_len = 0;
+ }
+ dent_len += sg->length;
+ prev = addr + sg->length;
+ }
+ dma_sg->dma_address = dent_addr;
+ dma_sg->dma_length = dent_len;
+
+ if (dma_sg != sg) {
+ dma_sg = sg_next(dma_sg);
+ dma_sg->dma_length = 0;
+ }
+
+ return ((unsigned long) dent_addr +
+ (unsigned long) dent_len +
+ (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
+}
diff --git a/trunk/arch/sparc64/kernel/iommu_common.h b/trunk/arch/sparc64/kernel/iommu_common.h
index 4b5cafa2877a..a90d046e8024 100644
--- a/trunk/arch/sparc64/kernel/iommu_common.h
+++ b/trunk/arch/sparc64/kernel/iommu_common.h
@@ -30,32 +30,6 @@
*/
#define IOMMU_PAGE_SHIFT 13
-#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
-
-static inline unsigned long iommu_num_pages(unsigned long vaddr,
- unsigned long slen)
-{
- unsigned long npages;
-
- npages = IO_PAGE_ALIGN(vaddr + slen) - (vaddr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
-
- return npages;
-}
-
-static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
-{
- unsigned long i, npages = 0;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
- npages += iommu_num_pages(paddr, sg->length);
- }
-
- return npages;
-}
-
/* You are _strongly_ advised to enable the following debugging code
* any time you make changes to the sg code below, run it for a while
* with filesystems mounted read-only before buying the farm... -DaveM
diff --git a/trunk/arch/sparc64/kernel/pci_sun4v.c b/trunk/arch/sparc64/kernel/pci_sun4v.c
index 61baf8dc095e..5ea2eab1ccda 100644
--- a/trunk/arch/sparc64/kernel/pci_sun4v.c
+++ b/trunk/arch/sparc64/kernel/pci_sun4v.c
@@ -365,14 +365,113 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
+
+static long fill_sg(long entry, struct device *dev,
+ struct scatterlist *sg,
+ int nused, int nelems, unsigned long prot)
+{
+ struct scatterlist *dma_sg = sg;
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+
+ iommu_batch_start(dev, prot, entry);
+
+ for (i = 0; i < nused; i++) {
+ unsigned long pteval = ~0UL;
+ u32 dma_npages;
+
+ dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
+ dma_sg->dma_length +
+ ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
+ do {
+ unsigned long offset;
+ signed int len;
+
+ /* If we are here, we know we have at least one
+ * more page to map. So walk forward until we
+ * hit a page crossing, and begin creating new
+ * mappings from that spot.
+ */
+ for (;;) {
+ unsigned long tmp;
+
+ tmp = SG_ENT_PHYS_ADDRESS(sg);
+ len = sg->length;
+ if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = tmp & IO_PAGE_MASK;
+ offset = tmp & (IO_PAGE_SIZE - 1UL);
+ break;
+ }
+ if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
+ offset = 0UL;
+ len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
+ break;
+ }
+ sg = sg_next(sg);
+ nelems--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE);
+ while (len > 0) {
+ long err;
+
+ err = iommu_batch_add(pteval);
+ if (unlikely(err < 0L))
+ goto iommu_map_failed;
+
+ pteval += IO_PAGE_SIZE;
+ len -= (IO_PAGE_SIZE - offset);
+ offset = 0;
+ dma_npages--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE) + len;
+ sg = sg_next(sg);
+ nelems--;
+
+ /* Skip over any tail mappings we've fully mapped,
+ * adjusting pteval along the way. Stop when we
+ * detect a page crossing event.
+ */
+ while (nelems &&
+ (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+ (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
+ ((pteval ^
+ (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
+ pteval += sg->length;
+ sg = sg_next(sg);
+ nelems--;
+ }
+ if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
+ pteval = ~0UL;
+ } while (dma_npages != 0);
+ dma_sg = sg_next(dma_sg);
+ }
+
+ if (unlikely(iommu_batch_end() < 0L))
+ goto iommu_map_failed;
+
+ local_irq_restore(flags);
+ return 0;
+
+iommu_map_failed:
+ local_irq_restore(flags);
+ return -1L;
+}
+
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- unsigned long flags, npages, i, prot;
- struct scatterlist *sg;
struct iommu *iommu;
- long entry, err;
+ unsigned long flags, npages, prot;
u32 dma_base;
+ struct scatterlist *sgtmp;
+ long entry, err;
+ int used;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
@@ -390,8 +489,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(direction == DMA_NONE))
goto bad;
- npages = calc_npages(sglist, nelems);
+ /* Step 1: Prepare scatter list. */
+ npages = prepare_sg(dev, sglist, nelems);
+ /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -402,45 +503,27 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
dma_base = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
- prot = HV_PCI_MAP_ATTR_READ;
- if (direction != DMA_TO_DEVICE)
- prot |= HV_PCI_MAP_ATTR_WRITE;
-
- local_irq_save(flags);
-
- iommu_batch_start(dev, prot, entry);
-
- for_each_sg(sglist, sg, nelems, i) {
- unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
- unsigned long slen = sg->length;
- unsigned long this_npages;
+ /* Step 3: Normalize DMA addresses. */
+ used = nelems;
- this_npages = iommu_num_pages(paddr, slen);
-
- sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
- sg->dma_length = slen;
-
- paddr &= IO_PAGE_MASK;
- while (this_npages--) {
- err = iommu_batch_add(paddr);
- if (unlikely(err < 0L)) {
- local_irq_restore(flags);
- goto iommu_map_failed;
- }
-
- paddr += IO_PAGE_SIZE;
- dma_base += IO_PAGE_SIZE;
- }
+ sgtmp = sglist;
+ while (used && sgtmp->dma_length) {
+ sgtmp->dma_address += dma_base;
+ sgtmp = sg_next(sgtmp);
+ used--;
}
+ used = nelems - used;
- err = iommu_batch_end();
-
- local_irq_restore(flags);
+ /* Step 4: Create the mappings. */
+ prot = HV_PCI_MAP_ATTR_READ;
+ if (direction != DMA_TO_DEVICE)
+ prot |= HV_PCI_MAP_ATTR_WRITE;
+ err = fill_sg(entry, dev, sglist, used, nelems, prot);
if (unlikely(err < 0L))
goto iommu_map_failed;
- return nelems;
+ return used;
bad:
if (printk_ratelimit())
@@ -458,11 +541,12 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- unsigned long flags, npages;
struct pci_pbm_info *pbm;
- u32 devhandle, bus_addr;
struct iommu *iommu;
+ unsigned long flags, i, npages;
+ struct scatterlist *sg, *sgprv;
long entry;
+ u32 devhandle, bus_addr;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
@@ -474,8 +558,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
+ sgprv = NULL;
+ for_each_sg(sglist, sg, nelems, i) {
+ if (sg->dma_length == 0)
+ break;
+
+ sgprv = sg;
+ }
- npages = calc_npages(sglist, nelems);
+ npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
+ bus_addr) >> IO_PAGE_SHIFT;
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
diff --git a/trunk/arch/sparc64/kernel/smp.c b/trunk/arch/sparc64/kernel/smp.c
index a8052b76df41..c39944927f1a 100644
--- a/trunk/arch/sparc64/kernel/smp.c
+++ b/trunk/arch/sparc64/kernel/smp.c
@@ -46,6 +46,8 @@
#include
#include
+extern void calibrate_delay(void);
+
int sparc64_multi_core __read_mostly;
cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
diff --git a/trunk/arch/sparc64/kernel/sparc64_ksyms.c b/trunk/arch/sparc64/kernel/sparc64_ksyms.c
index 8649635d6d74..60765e314bd8 100644
--- a/trunk/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/trunk/arch/sparc64/kernel/sparc64_ksyms.c
@@ -277,7 +277,6 @@ EXPORT_SYMBOL(sys_getpid);
EXPORT_SYMBOL(sys_geteuid);
EXPORT_SYMBOL(sys_getuid);
EXPORT_SYMBOL(sys_getegid);
-EXPORT_SYMBOL(sysctl_nr_open);
EXPORT_SYMBOL(sys_getgid);
EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(svr4_setcontext);
diff --git a/trunk/arch/sparc64/kernel/systbls.S b/trunk/arch/sparc64/kernel/systbls.S
index adc62f490f36..b8058906e727 100644
--- a/trunk/arch/sparc64/kernel/systbls.S
+++ b/trunk/arch/sparc64/kernel/systbls.S
@@ -80,8 +80,7 @@ sys_call_table32:
.word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
-/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
- .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime
+/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_ni_syscall, sys_eventfd, compat_sys_fallocate
#endif /* CONFIG_COMPAT */
@@ -153,8 +152,7 @@ sys_call_table:
.word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
- .word sys_timerfd_settime, sys_timerfd_gettime
+/*310*/ .word sys_utimensat, sys_signalfd, sys_ni_syscall, sys_eventfd, sys_fallocate
#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -273,7 +271,6 @@ sunos_sys_table:
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys
/*310*/ .word sunos_nosys, sunos_nosys, sunos_nosys
- .word sunos_nosys, sunos_nosys, sunos_nosys
- .word sunos_nosys
+ .word sunos_nosys, sunos_nosys
#endif
diff --git a/trunk/arch/sparc64/kernel/time.c b/trunk/arch/sparc64/kernel/time.c
index d204f1ab1d4c..4352ee4d8dac 100644
--- a/trunk/arch/sparc64/kernel/time.c
+++ b/trunk/arch/sparc64/kernel/time.c
@@ -1707,11 +1707,6 @@ static void __exit rtc_mini_exit(void)
misc_deregister(&rtc_mini_dev);
}
-int __devinit read_current_timer(unsigned long *timer_val)
-{
- *timer_val = tick_ops->get_tick();
- return 0;
-}
module_init(rtc_mini_init);
module_exit(rtc_mini_exit);
diff --git a/trunk/arch/sparc64/mm/init.c b/trunk/arch/sparc64/mm/init.c
index e726c45645ff..523e993ee90c 100644
--- a/trunk/arch/sparc64/mm/init.c
+++ b/trunk/arch/sparc64/mm/init.c
@@ -997,7 +997,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
initrd_start, initrd_end);
#endif
- reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
+ reserve_bootmem(initrd_start, size);
initrd_start += PAGE_OFFSET;
initrd_end += PAGE_OFFSET;
@@ -1007,7 +1007,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
#ifdef CONFIG_DEBUG_BOOTMEM
prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
#endif
- reserve_bootmem(kern_base, kern_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(kern_base, kern_size);
*pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
/* Add back in the initmem pages. */
@@ -1024,7 +1024,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
(bootmap_pfn << PAGE_SHIFT), size);
#endif
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
for (i = 0; i < pavail_ents; i++) {
unsigned long start_pfn, end_pfn;
@@ -1489,7 +1489,7 @@ static void __init taint_real_pages(void)
goto do_next_page;
}
}
- reserve_bootmem(old_start, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem(old_start, PAGE_SIZE);
do_next_page:
old_start += PAGE_SIZE;
diff --git a/trunk/arch/sparc64/solaris/fs.c b/trunk/arch/sparc64/solaris/fs.c
index 9311bfe4f2f7..61be597bf430 100644
--- a/trunk/arch/sparc64/solaris/fs.c
+++ b/trunk/arch/sparc64/solaris/fs.c
@@ -624,7 +624,7 @@ asmlinkage int solaris_ulimit(int cmd, int val)
case 3: /* UL_GMEMLIM */
return current->signal->rlim[RLIMIT_DATA].rlim_cur;
case 4: /* UL_GDESLIM */
- return sysctl_nr_open;
+ return NR_OPEN;
}
return -EINVAL;
}
diff --git a/trunk/arch/sparc64/solaris/timod.c b/trunk/arch/sparc64/solaris/timod.c
index f53123c02c2b..a9d32ceabf26 100644
--- a/trunk/arch/sparc64/solaris/timod.c
+++ b/trunk/arch/sparc64/solaris/timod.c
@@ -859,8 +859,7 @@ asmlinkage int solaris_getmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
- if (fd >= sysctl_nr_open)
- goto out;
+ if(fd >= NR_OPEN) goto out;
fdt = files_fdtable(current->files);
filp = fdt->fd[fd];
@@ -928,8 +927,7 @@ asmlinkage int solaris_putmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
- if (fd >= sysctl_nr_open)
- goto out;
+ if(fd >= NR_OPEN) goto out;
fdt = files_fdtable(current->files);
filp = fdt->fd[fd];
diff --git a/trunk/arch/v850/kernel/anna.c b/trunk/arch/v850/kernel/anna.c
index 5978a25170fb..0e429041a117 100644
--- a/trunk/arch/v850/kernel/anna.c
+++ b/trunk/arch/v850/kernel/anna.c
@@ -85,8 +85,7 @@ void __init mach_reserve_bootmem ()
/* The space between SRAM and SDRAM is filled with duplicate
images of SRAM. Prevent the kernel from using them. */
reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
- SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE),
- BOOTMEM_DEFAULT);
+ SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE));
}
void mach_gettimeofday (struct timespec *tv)
diff --git a/trunk/arch/v850/kernel/as85ep1.c b/trunk/arch/v850/kernel/as85ep1.c
index b525ecf3aea4..18437bc5c3ad 100644
--- a/trunk/arch/v850/kernel/as85ep1.c
+++ b/trunk/arch/v850/kernel/as85ep1.c
@@ -116,8 +116,7 @@ void __init mach_reserve_bootmem ()
if (SDRAM_ADDR < RAM_END && SDRAM_ADDR > RAM_START)
/* We can't use the space between SRAM and SDRAM, so
prevent the kernel from trying. */
- reserve_bootmem(SRAM_END, SDRAM_ADDR - SRAM_END,
- BOOTMEM_DEFAULT);
+ reserve_bootmem (SRAM_END, SDRAM_ADDR - SRAM_END);
}
void mach_gettimeofday (struct timespec *tv)
diff --git a/trunk/arch/v850/kernel/rte_ma1_cb.c b/trunk/arch/v850/kernel/rte_ma1_cb.c
index 08abf3d5f8df..9a716f946421 100644
--- a/trunk/arch/v850/kernel/rte_ma1_cb.c
+++ b/trunk/arch/v850/kernel/rte_ma1_cb.c
@@ -46,15 +46,13 @@ void __init mach_reserve_bootmem ()
{
#ifdef CONFIG_RTE_CB_MULTI
/* Prevent the kernel from touching the monitor's scratch RAM. */
- reserve_bootmem(MON_SCRATCH_ADDR, MON_SCRATCH_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem (MON_SCRATCH_ADDR, MON_SCRATCH_SIZE);
#endif
/* The space between SRAM and SDRAM is filled with duplicate
images of SRAM. Prevent the kernel from using them. */
reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
- SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE),
- BOOTMEM_DEFAULT);
+ SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE));
}
void mach_gettimeofday (struct timespec *tv)
diff --git a/trunk/arch/v850/kernel/setup.c b/trunk/arch/v850/kernel/setup.c
index a0a8456a8430..a914f244f494 100644
--- a/trunk/arch/v850/kernel/setup.c
+++ b/trunk/arch/v850/kernel/setup.c
@@ -241,18 +241,15 @@ init_bootmem_alloc (unsigned long ram_start, unsigned long ram_len)
if (kram_end > kram_start)
/* Reserve the RAM part of the kernel's address space, so it
doesn't get allocated. */
- reserve_bootmem(kram_start, kram_end - kram_start,
- BOOTMEM_DEFAULT);
+ reserve_bootmem (kram_start, kram_end - kram_start);
if (intv_in_ram && !intv_in_kram)
/* Reserve the interrupt vector space. */
- reserve_bootmem(intv_start, intv_end - intv_start,
- BOOTMEM_DEFAULT);
+ reserve_bootmem (intv_start, intv_end - intv_start);
if (bootmap >= ram_start && bootmap < ram_end)
/* Reserve the bootmap space. */
- reserve_bootmem(bootmap, bootmap_len,
- BOOTMEM_DEFAULT);
+ reserve_bootmem (bootmap, bootmap_len);
/* Reserve the memory used by the root filesystem image if it's
in RAM. */
@@ -260,8 +257,7 @@ init_bootmem_alloc (unsigned long ram_start, unsigned long ram_len)
&& (unsigned long)&_root_fs_image_start >= ram_start
&& (unsigned long)&_root_fs_image_start < ram_end)
reserve_bootmem ((unsigned long)&_root_fs_image_start,
- &_root_fs_image_end - &_root_fs_image_start,
- BOOTMEM_DEFAULT);
+ &_root_fs_image_end - &_root_fs_image_start);
/* Let the platform-dependent code reserve some too. */
if (mrb)
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 923c3babd667..434821187cfc 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -415,7 +415,7 @@ config HPET_TIMER
config HPET_EMULATE_RTC
def_bool y
- depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
+ depends on HPET_TIMER && (RTC=y || RTC=m)
# Mark as embedded because too many people got it wrong.
# The code disables itself when not needed.
@@ -631,6 +631,7 @@ config TOSHIBA
config I8K
tristate "Dell laptop support"
+ depends on X86_32
---help---
This adds a driver to safely access the System Management Mode
of the CPU on the Dell Inspiron 8000. The System Management Mode
diff --git a/trunk/arch/x86/Kconfig.debug b/trunk/arch/x86/Kconfig.debug
index fa555148823d..2e1e3af28c3a 100644
--- a/trunk/arch/x86/Kconfig.debug
+++ b/trunk/arch/x86/Kconfig.debug
@@ -220,9 +220,9 @@ config DEBUG_BOOT_PARAMS
This option will cause struct boot_params to be exported via debugfs.
config CPA_DEBUG
- bool "CPA self-test code"
+ bool "CPA self test code"
depends on DEBUG_KERNEL
help
- Do change_page_attr() self-tests every 30 seconds.
+ Do change_page_attr self tests at boot.
endmenu
diff --git a/trunk/arch/x86/ia32/ia32_aout.c b/trunk/arch/x86/ia32/ia32_aout.c
index 58cccb6483b0..e4c12079171b 100644
--- a/trunk/arch/x86/ia32/ia32_aout.c
+++ b/trunk/arch/x86/ia32/ia32_aout.c
@@ -172,7 +172,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
has_dumped = 1;
current->flags |= PF_DUMPCORE;
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
- dump.u_ar0 = offsetof(struct user32, regs);
+ dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) -
+ ((unsigned long)(&dump)));
dump.signal = signr;
dump_thread32(regs, &dump);
diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c
index f86a3c4a2669..d9313d9adced 100644
--- a/trunk/arch/x86/kernel/cpu/common.c
+++ b/trunk/arch/x86/kernel/cpu/common.c
@@ -637,7 +637,7 @@ void __init early_cpu_init(void)
}
/* Make sure %fs is initialized properly in idle threads */
-struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
regs->fs = __KERNEL_PERCPU;
diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 5affe91ca1e5..a0522735dd9d 100644
--- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -827,6 +827,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
for (i = 0; i < data->acpi_data.state_count; i++) {
u32 index;
+ u32 hi = 0, lo = 0;
index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
if (index > data->max_hw_pstate) {
diff --git a/trunk/arch/x86/kernel/cpu/cyrix.c b/trunk/arch/x86/kernel/cpu/cyrix.c
index 7139b0262703..404a6a2d4016 100644
--- a/trunk/arch/x86/kernel/cpu/cyrix.c
+++ b/trunk/arch/x86/kernel/cpu/cyrix.c
@@ -83,6 +83,8 @@ static char cyrix_model_mult2[] __cpuinitdata = "12233445";
* FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
*/
+extern void calibrate_delay(void) __init;
+
static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
{
unsigned long flags;
diff --git a/trunk/arch/x86/kernel/cpu/mtrr/main.c b/trunk/arch/x86/kernel/cpu/mtrr/main.c
index b6e136f23d3d..1e27b69a7a0e 100644
--- a/trunk/arch/x86/kernel/cpu/mtrr/main.c
+++ b/trunk/arch/x86/kernel/cpu/mtrr/main.c
@@ -659,7 +659,7 @@ static __init int amd_special_default_mtrr(void)
*/
int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{
- unsigned long i, base, size, highest_pfn = 0, def, dummy;
+ unsigned long i, base, size, highest_addr = 0, def, dummy;
mtrr_type type;
u64 trim_start, trim_size;
@@ -682,27 +682,28 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
mtrr_if->get(i, &base, &size, &type);
if (type != MTRR_TYPE_WRBACK)
continue;
- if (highest_pfn < base + size)
- highest_pfn = base + size;
+ base <<= PAGE_SHIFT;
+ size <<= PAGE_SHIFT;
+ if (highest_addr < base + size)
+ highest_addr = base + size;
}
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
- if (!highest_pfn) {
+ if (!highest_addr) {
printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
WARN_ON(1);
return 0;
}
- if (highest_pfn < end_pfn) {
+ if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
- " all of memory, losing %luMB of RAM.\n",
- (end_pfn - highest_pfn) >> (20 - PAGE_SHIFT));
+ " all of memory, losing %LdMB of RAM.\n",
+ (((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20);
WARN_ON(1);
printk(KERN_INFO "update e820 for mtrr\n");
- trim_start = highest_pfn;
- trim_start <<= PAGE_SHIFT;
+ trim_start = highest_addr;
trim_size = end_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S
index c7341e81941c..bea8474744ff 100644
--- a/trunk/arch/x86/kernel/entry_64.S
+++ b/trunk/arch/x86/kernel/entry_64.S
@@ -582,6 +582,7 @@ retint_restore_args: /* return to kernel space */
TRACE_IRQS_IRETQ
restore_args:
RESTORE_ARGS 0,8,0
+iret_label:
#ifdef CONFIG_PARAVIRT
INTERRUPT_RETURN
#endif
@@ -592,22 +593,13 @@ ENTRY(native_iret)
.quad native_iret, bad_iret
.previous
.section .fixup,"ax"
+ /* force a signal here? this matches i386 behaviour */
+ /* running with kernel gs */
bad_iret:
- /*
- * The iret traps when the %cs or %ss being restored is bogus.
- * We've lost the original trap vector and error code.
- * #GPF is the most likely one to get for an invalid selector.
- * So pretend we completed the iret and took the #GPF in user mode.
- *
- * We are now running with the kernel GS after exception recovery.
- * But error_entry expects us to have user GS to match the user %cs,
- * so swap back.
- */
- pushq $0
-
- SWAPGS
- jmp general_protection
-
+ movq $11,%rdi /* SIGSEGV */
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
+ jmp do_exit
.previous
/* edi: workmask, edx: work */
@@ -919,7 +911,7 @@ error_kernelspace:
iret run with kernel gs again, so don't set the user space flag.
B stepping K8s sometimes report an truncated RIP for IRET
exceptions returning to compat mode. Check for these here too. */
- leaq native_iret(%rip),%rbp
+ leaq iret_label(%rip),%rbp
cmpq %rbp,RIP(%rsp)
je error_swapgs
movl %ebp,%ebp /* zero extend */
diff --git a/trunk/arch/x86/kernel/head_64.S b/trunk/arch/x86/kernel/head_64.S
index 09b38d539b09..4f283ad215ec 100644
--- a/trunk/arch/x86/kernel/head_64.S
+++ b/trunk/arch/x86/kernel/head_64.S
@@ -250,13 +250,18 @@ ENTRY(secondary_startup_64)
lretq
/* SMP bootup changes these two */
- __CPUINITDATA
+#ifndef CONFIG_HOTPLUG_CPU
+ .pushsection .init.data
+#endif
.align 8
- ENTRY(initial_code)
+ .globl initial_code
+initial_code:
.quad x86_64_start_kernel
- __FINITDATA
-
- ENTRY(init_rsp)
+#ifndef CONFIG_HOTPLUG_CPU
+ .popsection
+#endif
+ .globl init_rsp
+init_rsp:
.quad init_thread_union+THREAD_SIZE-8
bad_address:
diff --git a/trunk/arch/x86/kernel/machine_kexec_32.c b/trunk/arch/x86/kernel/machine_kexec_32.c
index d0b234c9fc31..c1cfd60639d4 100644
--- a/trunk/arch/x86/kernel/machine_kexec_32.c
+++ b/trunk/arch/x86/kernel/machine_kexec_32.c
@@ -151,7 +151,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
diff --git a/trunk/arch/x86/kernel/machine_kexec_64.c b/trunk/arch/x86/kernel/machine_kexec_64.c
index 236d2f8f7ddc..a1fef42f8cdb 100644
--- a/trunk/arch/x86/kernel/machine_kexec_64.c
+++ b/trunk/arch/x86/kernel/machine_kexec_64.c
@@ -234,10 +234,5 @@ NORET_TYPE void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_SYMBOL(init_level4_pgt);
-
-#ifdef CONFIG_NUMA
- VMCOREINFO_SYMBOL(node_data);
- VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
-#endif
}
diff --git a/trunk/arch/x86/kernel/mpparse_32.c b/trunk/arch/x86/kernel/mpparse_32.c
index f349e68e45a0..67009cdd5eca 100644
--- a/trunk/arch/x86/kernel/mpparse_32.c
+++ b/trunk/arch/x86/kernel/mpparse_32.c
@@ -736,8 +736,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
smp_found_config = 1;
printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
mpf, virt_to_phys(mpf));
- reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
if (mpf->mpf_physptr) {
/*
* We cannot access to MPC table to compute
@@ -752,8 +751,7 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
unsigned long end = max_low_pfn * PAGE_SIZE;
if (mpf->mpf_physptr + size > end)
size = end - mpf->mpf_physptr;
- reserve_bootmem(mpf->mpf_physptr, size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(mpf->mpf_physptr, size);
}
mpf_found = mpf;
diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c
index 702c33efea84..96286df1bb81 100644
--- a/trunk/arch/x86/kernel/ptrace.c
+++ b/trunk/arch/x86/kernel/ptrace.c
@@ -103,26 +103,9 @@ static int set_segment_reg(struct task_struct *task,
if (invalid_selector(value))
return -EIO;
- /*
- * For %cs and %ss we cannot permit a null selector.
- * We can permit a bogus selector as long as it has USER_RPL.
- * Null selectors are fine for other segment registers, but
- * we will never get back to user mode with invalid %cs or %ss
- * and will take the trap in iret instead. Much code relies
- * on user_mode() to distinguish a user trap frame (which can
- * safely use invalid selectors) from a kernel trap frame.
- */
- switch (offset) {
- case offsetof(struct user_regs_struct, cs):
- case offsetof(struct user_regs_struct, ss):
- if (unlikely(value == 0))
- return -EIO;
-
- default:
+ if (offset != offsetof(struct user_regs_struct, gs))
*pt_regs_access(task_pt_regs(task), offset) = value;
- break;
-
- case offsetof(struct user_regs_struct, gs):
+ else {
task->thread.gs = value;
if (task == current)
/*
@@ -244,16 +227,12 @@ static int set_segment_reg(struct task_struct *task,
* Can't actually change these in 64-bit mode.
*/
case offsetof(struct user_regs_struct,cs):
- if (unlikely(value == 0))
- return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->cs = value;
#endif
break;
case offsetof(struct user_regs_struct,ss):
- if (unlikely(value == 0))
- return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->ss = value;
diff --git a/trunk/arch/x86/kernel/quirks.c b/trunk/arch/x86/kernel/quirks.c
index 6ba33ca8715a..3cd7a2dcd4fe 100644
--- a/trunk/arch/x86/kernel/quirks.c
+++ b/trunk/arch/x86/kernel/quirks.c
@@ -380,19 +380,19 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
void force_hpet_resume(void)
{
switch (force_hpet_resume_type) {
- case ICH_FORCE_HPET_RESUME:
- ich_force_hpet_resume();
- return;
- case OLD_ICH_FORCE_HPET_RESUME:
- old_ich_force_hpet_resume();
- return;
- case VT8237_FORCE_HPET_RESUME:
- vt8237_force_hpet_resume();
- return;
- case NVIDIA_FORCE_HPET_RESUME:
- nvidia_force_hpet_resume();
- return;
- default:
+ case ICH_FORCE_HPET_RESUME:
+ return ich_force_hpet_resume();
+
+ case OLD_ICH_FORCE_HPET_RESUME:
+ return old_ich_force_hpet_resume();
+
+ case VT8237_FORCE_HPET_RESUME:
+ return vt8237_force_hpet_resume();
+
+ case NVIDIA_FORCE_HPET_RESUME:
+ return nvidia_force_hpet_resume();
+
+ default:
break;
}
}
diff --git a/trunk/arch/x86/kernel/setup_32.c b/trunk/arch/x86/kernel/setup_32.c
index d1d8c347cc0b..62adc5f20be5 100644
--- a/trunk/arch/x86/kernel/setup_32.c
+++ b/trunk/arch/x86/kernel/setup_32.c
@@ -390,7 +390,7 @@ static void __init reserve_ebda_region(void)
unsigned int addr;
addr = get_bios_ebda();
if (addr)
- reserve_bootmem(addr, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem(addr, PAGE_SIZE);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -484,8 +484,7 @@ static void __init reserve_crashkernel(void)
(unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
- reserve_bootmem(crash_base, crash_size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem(crash_base, crash_size);
} else
printk(KERN_INFO "crashkernel reservation failed - "
"you have to specify a base address\n");
@@ -526,7 +525,7 @@ static void __init reserve_initrd(void)
}
if (ramdisk_end <= end_of_lowmem) {
/* All in lowmem, easy case */
- reserve_bootmem(ramdisk_image, ramdisk_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(ramdisk_image, ramdisk_size);
initrd_start = ramdisk_image + PAGE_OFFSET;
initrd_end = initrd_start+ramdisk_size;
return;
@@ -537,7 +536,7 @@ static void __init reserve_initrd(void)
/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- reserve_bootmem(ramdisk_here, ramdisk_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(ramdisk_here, ramdisk_size);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
@@ -607,14 +606,13 @@ void __init setup_bootmem_allocator(void)
* bootmem allocator with an invalid RAM area.
*/
reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
- bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text),
- BOOTMEM_DEFAULT);
+ bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem(0, PAGE_SIZE);
/* reserve EBDA region, it's a 4K region */
reserve_ebda_region();
@@ -624,7 +622,7 @@ void __init setup_bootmem_allocator(void)
unless you have no PS/2 mouse plugged in. */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 6)
- reserve_bootmem(0xa0000 - 4096, 4096, BOOTMEM_DEFAULT);
+ reserve_bootmem(0xa0000 - 4096, 4096);
#ifdef CONFIG_SMP
/*
@@ -632,7 +630,7 @@ void __init setup_bootmem_allocator(void)
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_bootmem(PAGE_SIZE, PAGE_SIZE, BOOTMEM_DEFAULT);
+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
#endif
#ifdef CONFIG_ACPI_SLEEP
/*
diff --git a/trunk/arch/x86/kernel/setup_64.c b/trunk/arch/x86/kernel/setup_64.c
index a49f5f734a5e..c8939dfddfba 100644
--- a/trunk/arch/x86/kernel/setup_64.c
+++ b/trunk/arch/x86/kernel/setup_64.c
@@ -189,7 +189,7 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
e820_register_active_regions(0, start_pfn, end_pfn);
free_bootmem_with_active_regions(0, end_pfn);
- reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
+ reserve_bootmem(bootmap, bootmap_size);
}
#endif
@@ -220,35 +220,28 @@ static inline void copy_edd(void)
#ifdef CONFIG_KEXEC
static void __init reserve_crashkernel(void)
{
- unsigned long long total_mem;
+ unsigned long long free_mem;
unsigned long long crash_size, crash_base;
int ret;
- total_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
+ free_mem =
+ ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
- ret = parse_crashkernel(boot_command_line, total_mem,
+ ret = parse_crashkernel(boot_command_line, free_mem,
&crash_size, &crash_base);
if (ret == 0 && crash_size) {
- if (crash_base <= 0) {
+ if (crash_base > 0) {
+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crash_base >> 20),
+ (unsigned long)(free_mem >> 20));
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ reserve_bootmem(crash_base, crash_size);
+ } else
printk(KERN_INFO "crashkernel reservation failed - "
"you have to specify a base address\n");
- return;
- }
-
- if (reserve_bootmem(crash_base, crash_size,
- BOOTMEM_EXCLUSIVE) < 0) {
- printk(KERN_INFO "crashkernel reservation failed - "
- "memory is in use\n");
- return;
- }
-
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(total_mem >> 20));
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
}
}
#else
diff --git a/trunk/arch/x86/kernel/smpboot_32.c b/trunk/arch/x86/kernel/smpboot_32.c
index 579b9b740c7c..5787a0c3e296 100644
--- a/trunk/arch/x86/kernel/smpboot_32.c
+++ b/trunk/arch/x86/kernel/smpboot_32.c
@@ -202,6 +202,8 @@ void __cpuinit smp_store_cpu_info(int id)
;
}
+extern void calibrate_delay(void);
+
static atomic_t init_deasserted;
static void __cpuinit smp_callin(void)
diff --git a/trunk/arch/x86/kernel/test_nx.c b/trunk/arch/x86/kernel/test_nx.c
index 10b8a6f69f84..36c100c323aa 100644
--- a/trunk/arch/x86/kernel/test_nx.c
+++ b/trunk/arch/x86/kernel/test_nx.c
@@ -139,6 +139,7 @@ static int test_NX(void)
* Until then, don't run them to avoid too many people getting scared
* by the error message
*/
+#if 0
#ifdef CONFIG_DEBUG_RODATA
/* Test 3: Check if the .rodata section is executable */
@@ -151,7 +152,6 @@ static int test_NX(void)
}
#endif
-#if 0
/* Test 4: Check if the .data section of a module is executable */
if (test_address(&test_data)) {
printk(KERN_ERR "test_nx: .data section is executable\n");
diff --git a/trunk/arch/x86/kernel/traps_32.c b/trunk/arch/x86/kernel/traps_32.c
index b22c01e05a18..3cf72977d012 100644
--- a/trunk/arch/x86/kernel/traps_32.c
+++ b/trunk/arch/x86/kernel/traps_32.c
@@ -1176,12 +1176,17 @@ void __init trap_init(void)
#endif
set_trap_gate(19,&simd_coprocessor_error);
- /*
- * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
- * Generate a build-time error if the alignment is wrong.
- */
- BUILD_BUG_ON(offsetof(struct task_struct, thread.i387.fxsave) & 15);
if (cpu_has_fxsr) {
+ /*
+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
+ * Generates a compile-time "error: zero width for bit-field" if
+ * the alignment is wrong.
+ */
+ struct fxsrAlignAssert {
+ int _:!(offsetof(struct task_struct,
+ thread.i387.fxsave) & 15);
+ };
+
printk(KERN_INFO "Enabling fast FPU save and restore... ");
set_in_cr4(X86_CR4_OSFXSR);
printk("done.\n");
diff --git a/trunk/arch/x86/lib/delay_32.c b/trunk/arch/x86/lib/delay_32.c
index 4535e6d147ad..aad9d95469dc 100644
--- a/trunk/arch/x86/lib/delay_32.c
+++ b/trunk/arch/x86/lib/delay_32.c
@@ -12,10 +12,8 @@
#include
#include
-#include
#include
#include
-#include
#include
#include
@@ -65,7 +63,7 @@ void use_tsc_delay(void)
delay_fn = delay_tsc;
}
-int __devinit read_current_timer(unsigned long *timer_val)
+int read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
rdtscl(*timer_val);
diff --git a/trunk/arch/x86/lib/delay_64.c b/trunk/arch/x86/lib/delay_64.c
index bbc610518516..45cdd3fbd91c 100644
--- a/trunk/arch/x86/lib/delay_64.c
+++ b/trunk/arch/x86/lib/delay_64.c
@@ -10,10 +10,8 @@
#include
#include
-#include
#include
#include
-#include
#include
#include
@@ -22,7 +20,7 @@
#include
#endif
-int __devinit read_current_timer(unsigned long *timer_value)
+int read_current_timer(unsigned long *timer_value)
{
rdtscll(*timer_value);
return 0;
diff --git a/trunk/arch/x86/mach-voyager/voyager_smp.c b/trunk/arch/x86/mach-voyager/voyager_smp.c
index 3cc8eb2f36a9..dffa786f61fe 100644
--- a/trunk/arch/x86/mach-voyager/voyager_smp.c
+++ b/trunk/arch/x86/mach-voyager/voyager_smp.c
@@ -444,6 +444,8 @@ static __u32 __init setup_trampoline(void)
static void __init start_secondary(void *unused)
{
__u8 cpuid = hard_smp_processor_id();
+ /* external functions not defined in the headers */
+ extern void calibrate_delay(void);
cpu_init();
diff --git a/trunk/arch/x86/mm/discontig_32.c b/trunk/arch/x86/mm/discontig_32.c
index c394ca0720b8..04b1d20e2613 100644
--- a/trunk/arch/x86/mm/discontig_32.c
+++ b/trunk/arch/x86/mm/discontig_32.c
@@ -391,8 +391,7 @@ unsigned long __init setup_memory(void)
void __init numa_kva_reserve(void)
{
if (kva_pages)
- reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages),
- BOOTMEM_DEFAULT);
+ reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages));
}
void __init zone_sizes_init(void)
diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c
index 621afb6343dc..ad8b9733d6b3 100644
--- a/trunk/arch/x86/mm/fault.c
+++ b/trunk/arch/x86/mm/fault.c
@@ -428,16 +428,6 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
}
#endif
-static int spurious_fault_check(unsigned long error_code, pte_t *pte)
-{
- if ((error_code & PF_WRITE) && !pte_write(*pte))
- return 0;
- if ((error_code & PF_INSTR) && !pte_exec(*pte))
- return 0;
-
- return 1;
-}
-
/*
* Handle a spurious fault caused by a stale TLB entry. This allows
* us to lazily refresh the TLB when increasing the permissions of a
@@ -467,21 +457,20 @@ static int spurious_fault(unsigned long address,
if (!pud_present(*pud))
return 0;
- if (pud_large(*pud))
- return spurious_fault_check(error_code, (pte_t *) pud);
-
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
- if (pmd_large(*pmd))
- return spurious_fault_check(error_code, (pte_t *) pmd);
-
pte = pte_offset_kernel(pmd, address);
if (!pte_present(*pte))
return 0;
- return spurious_fault_check(error_code, pte);
+ if ((error_code & PF_WRITE) && !pte_write(*pte))
+ return 0;
+ if ((error_code & PF_INSTR) && !pte_exec(*pte))
+ return 0;
+
+ return 1;
}
/*
@@ -958,12 +947,11 @@ void vmalloc_sync_all(void)
for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
if (!test_bit(pgd_index(address), insync)) {
const pgd_t *pgd_ref = pgd_offset_k(address);
- unsigned long flags;
struct page *page;
if (pgd_none(*pgd_ref))
continue;
- spin_lock_irqsave(&pgd_lock, flags);
+ spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
@@ -972,7 +960,7 @@ void vmalloc_sync_all(void)
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock(&pgd_lock);
set_bit(pgd_index(address), insync);
}
if (address == start)
diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c
index 5fe880fc305d..3a98d6f724ab 100644
--- a/trunk/arch/x86/mm/init_64.c
+++ b/trunk/arch/x86/mm/init_64.c
@@ -591,17 +591,10 @@ void mark_rodata_ro(void)
if (end <= start)
return;
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
-
- /*
- * The rodata section (but not the kernel text!) should also be
- * not-executable.
- */
- start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
- set_memory_nx(start, (end - start) >> PAGE_SHIFT);
rodata_test();
@@ -644,9 +637,9 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
- reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
#else
- reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
+ reserve_bootmem(phys, len);
#endif
if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
dma_reserve += len / PAGE_SIZE;
diff --git a/trunk/arch/x86/mm/numa_64.c b/trunk/arch/x86/mm/numa_64.c
index 1aecc658cd7d..5a02bf4c91ec 100644
--- a/trunk/arch/x86/mm/numa_64.c
+++ b/trunk/arch/x86/mm/numa_64.c
@@ -238,10 +238,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
free_bootmem_with_active_regions(nodeid, end);
- reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size,
- BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
- bootmap_pages<
-#include
#include
#include
#include
@@ -15,13 +14,8 @@
#include
#include
-/*
- * Only print the results of the first pass:
- */
-static __read_mostly int print = 1;
-
enum {
- NTEST = 400,
+ NTEST = 4000,
#ifdef CONFIG_X86_64
LPS = (1 << PMD_SHIFT),
#elif defined(CONFIG_X86_PAE)
@@ -37,7 +31,7 @@ struct split_state {
long min_exec, max_exec;
};
-static int print_split(struct split_state *s)
+static __init int print_split(struct split_state *s)
{
long i, expected, missed = 0;
int printed = 0;
@@ -88,13 +82,10 @@ static int print_split(struct split_state *s)
s->max_exec = addr;
}
}
- if (print) {
- printk(KERN_INFO
- " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
- s->spg, s->lpg, s->gpg, s->exec,
- s->min_exec != ~0UL ? s->min_exec : 0,
- s->max_exec, missed);
- }
+ printk(KERN_INFO
+ "CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
+ s->spg, s->lpg, s->gpg, s->exec,
+ s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
if (expected != i) {
@@ -105,11 +96,11 @@ static int print_split(struct split_state *s)
return err;
}
-static unsigned long addr[NTEST];
-static unsigned int len[NTEST];
+static unsigned long __initdata addr[NTEST];
+static unsigned int __initdata len[NTEST];
/* Change the global bit on random pages in the direct mapping */
-static int pageattr_test(void)
+static __init int exercise_pageattr(void)
{
struct split_state sa, sb, sc;
unsigned long *bm;
@@ -119,8 +110,7 @@ static int pageattr_test(void)
int i, k;
int err;
- if (print)
- printk(KERN_INFO "CPA self-test:\n");
+ printk(KERN_INFO "CPA exercising pageattr\n");
bm = vmalloc((max_pfn_mapped + 7) / 8);
if (!bm) {
@@ -196,6 +186,7 @@ static int pageattr_test(void)
failed += print_split(&sb);
+ printk(KERN_INFO "CPA reverting everything\n");
for (i = 0; i < NTEST; i++) {
if (!addr[i])
continue;
@@ -223,40 +214,12 @@ static int pageattr_test(void)
failed += print_split(&sc);
if (failed) {
- printk(KERN_ERR "NOT PASSED. Please report.\n");
+ printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
WARN_ON(1);
- return -EINVAL;
} else {
- if (print)
- printk(KERN_INFO "ok.\n");
+ printk(KERN_INFO "CPA selftests PASSED\n");
}
return 0;
}
-
-static int do_pageattr_test(void *__unused)
-{
- while (!kthread_should_stop()) {
- schedule_timeout_interruptible(HZ*30);
- if (pageattr_test() < 0)
- break;
- if (print)
- print--;
- }
- return 0;
-}
-
-static int start_pageattr_test(void)
-{
- struct task_struct *p;
-
- p = kthread_create(do_pageattr_test, NULL, "pageattr-test");
- if (!IS_ERR(p))
- wake_up_process(p);
- else
- WARN_ON(1);
-
- return 0;
-}
-
-module_init(start_pageattr_test);
+module_init(exercise_pageattr);
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index 8493c855582b..16ce841f08d6 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -167,6 +167,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
pgprot_val(forbidden) |= _PAGE_NX;
+
+#ifdef CONFIG_DEBUG_RODATA
/* The .rodata section needs to be read-only */
if (within(address, (unsigned long)__start_rodata,
(unsigned long)__end_rodata))
@@ -177,6 +179,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
if (within(address, virt_to_highmap(__start_rodata),
virt_to_highmap(__end_rodata)))
pgprot_val(forbidden) |= _PAGE_RW;
+#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
@@ -257,6 +260,17 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
pgprot_t old_prot, new_prot;
int level, do_split = 1;
+ /*
+ * An Athlon 64 X2 showed hard hangs if we tried to preserve
+ * largepages and changed the PSE entry from RW to RO.
+ *
+ * As AMD CPUs have a long series of erratas in this area,
+ * (and none of the known ones seem to explain this hang),
+ * disable this code until the hang can be debugged:
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return 1;
+
spin_lock_irqsave(&pgd_lock, flags);
/*
* Check for races, another CPU might have split this page
diff --git a/trunk/arch/x86/mm/srat_64.c b/trunk/arch/x86/mm/srat_64.c
index ecd91ea8a8ae..65416f843e59 100644
--- a/trunk/arch/x86/mm/srat_64.c
+++ b/trunk/arch/x86/mm/srat_64.c
@@ -488,8 +488,7 @@ void __init srat_reserve_add_area(int nodeid)
printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
"pre-allocated memory.\n", (unsigned long long)total_mb);
reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
- nodes_add[nodeid].end - nodes_add[nodeid].start,
- BOOTMEM_DEFAULT);
+ nodes_add[nodeid].end - nodes_add[nodeid].start);
}
}
diff --git a/trunk/arch/xtensa/kernel/time.c b/trunk/arch/xtensa/kernel/time.c
index 8df1e842f6d4..60d29fe0b1bd 100644
--- a/trunk/arch/xtensa/kernel/time.c
+++ b/trunk/arch/xtensa/kernel/time.c
@@ -204,7 +204,7 @@ irqreturn_t timer_interrupt (int irq, void *dev_id)
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
-void __cpuinit calibrate_delay(void)
+void __devinit calibrate_delay(void)
{
loops_per_jiffy = CCOUNT_PER_JIFFY;
printk("Calibrating delay loop (skipped)... "
diff --git a/trunk/crypto/async_tx/async_memcpy.c b/trunk/crypto/async_tx/async_memcpy.c
index 0f6282207b32..047e533fcc5b 100644
--- a/trunk/crypto/async_tx/async_memcpy.c
+++ b/trunk/crypto/async_tx/async_memcpy.c
@@ -35,7 +35,7 @@
* @src: src page
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
+ * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
@@ -46,29 +46,33 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
- &dest, 1, &src, 1, len);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY);
struct dma_device *device = chan ? chan->device : NULL;
- struct dma_async_tx_descriptor *tx = NULL;
+ int int_en = cb_fn ? 1 : 0;
+ struct dma_async_tx_descriptor *tx = device ?
+ device->device_prep_dma_memcpy(chan, len,
+ int_en) : NULL;
- if (device) {
- dma_addr_t dma_dest, dma_src;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ if (tx) { /* run the memcpy asynchronously */
+ dma_addr_t addr;
+ enum dma_data_direction dir;
- dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
- DMA_FROM_DEVICE);
+ pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
- dma_src = dma_map_page(device->dev, src, src_offset, len,
- DMA_TO_DEVICE);
+ dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
+ DMA_NONE : DMA_FROM_DEVICE;
- tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
- len, dma_prep_flags);
- }
+ addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
+ tx->tx_set_dest(addr, tx, 0);
+
+ dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
+ DMA_NONE : DMA_TO_DEVICE;
+
+ addr = dma_map_page(device->dev, src, src_offset, len, dir);
+ tx->tx_set_src(addr, tx, 0);
- if (tx) {
- pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
- } else {
+ } else { /* run the memcpy synchronously */
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
diff --git a/trunk/crypto/async_tx/async_memset.c b/trunk/crypto/async_tx/async_memset.c
index 09c0e83664bc..66ef6351202e 100644
--- a/trunk/crypto/async_tx/async_memset.c
+++ b/trunk/crypto/async_tx/async_memset.c
@@ -35,7 +35,7 @@
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
@@ -46,24 +46,24 @@ async_memset(struct page *dest, int val, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
- &dest, 1, NULL, 0, len);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET);
struct dma_device *device = chan ? chan->device : NULL;
- struct dma_async_tx_descriptor *tx = NULL;
+ int int_en = cb_fn ? 1 : 0;
+ struct dma_async_tx_descriptor *tx = device ?
+ device->device_prep_dma_memset(chan, val, len,
+ int_en) : NULL;
- if (device) {
- dma_addr_t dma_dest;
- unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ if (tx) { /* run the memset asynchronously */
+ dma_addr_t dma_addr;
+ enum dma_data_direction dir;
- dma_dest = dma_map_page(device->dev, dest, offset, len,
- DMA_FROM_DEVICE);
+ pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
+ dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
+ DMA_NONE : DMA_FROM_DEVICE;
- tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
- dma_prep_flags);
- }
+ dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
+ tx->tx_set_dest(dma_addr, tx, 0);
- if (tx) {
- pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memset synchronously */
void *dest_buf;
diff --git a/trunk/crypto/async_tx/async_tx.c b/trunk/crypto/async_tx/async_tx.c
index 562882189de5..bc18cbb8ea79 100644
--- a/trunk/crypto/async_tx/async_tx.c
+++ b/trunk/crypto/async_tx/async_tx.c
@@ -57,7 +57,8 @@ static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
*/
static spinlock_t async_tx_lock;
-static LIST_HEAD(async_tx_master_list);
+static struct list_head
+async_tx_master_list = LIST_HEAD_INIT(async_tx_master_list);
/* async_tx_issue_pending_all - start all transactions on all channels */
void async_tx_issue_pending_all(void)
@@ -361,13 +362,13 @@ static void __exit async_tx_exit(void)
}
/**
- * __async_tx_find_channel - find a channel to carry out the operation or let
+ * async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @depend_tx: transaction dependency
* @tx_type: transaction type
*/
struct dma_chan *
-__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
+async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type)
{
/* see if we can keep the chain on one channel */
@@ -383,7 +384,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
} else
return NULL;
}
-EXPORT_SYMBOL_GPL(__async_tx_find_channel);
+EXPORT_SYMBOL_GPL(async_tx_find_channel);
#else
static int __init async_tx_init(void)
{
diff --git a/trunk/crypto/async_tx/async_xor.c b/trunk/crypto/async_tx/async_xor.c
index 2259a4ff15cb..2575f674dcd5 100644
--- a/trunk/crypto/async_tx/async_xor.c
+++ b/trunk/crypto/async_tx/async_xor.c
@@ -30,51 +30,35 @@
#include
#include