diff --git a/[refs] b/[refs] index 67bdccae4f46..9a6d8d9af0bc 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: aee85fe8e8143d3f54d9e6d3c6cdd40ead563267 +refs/heads/master: d4965b3e2ff94d0c7b7e6e7e9794b54950a2f4b9 diff --git a/trunk/CREDITS b/trunk/CREDITS index c6d69bf10e15..0bf31eac6dc2 100644 --- a/trunk/CREDITS +++ b/trunk/CREDITS @@ -1127,8 +1127,10 @@ S: Carnegie, Pennsylvania 15106-4304 S: USA N: Philip Gladstone -E: philip@raptor.com +E: philip@gladstonefamily.net D: Kernel / timekeeping stuff +S: Carlisle, MA 01741 +S: USA N: Jan-Benedict Glaw E: jbglaw@lug-owl.de @@ -3741,10 +3743,11 @@ D: Mylex DAC960 PCI RAID driver D: Miscellaneous kernel fixes N: Alessandro Zummo -E: azummo@ita.flashnet.it -W: http://freepage.logicom.it/azummo/ +E: a.zummo@towertech.it D: CMI8330 support is sb_card.c D: ISAPnP fixes in sb_card.c +D: ZyXEL omni.net lcd plus driver +D: RTC subsystem S: Italy N: Marc Zyngier diff --git a/trunk/Documentation/DMA-mapping.txt b/trunk/Documentation/DMA-mapping.txt index 684557474c15..ee4bb73683cd 100644 --- a/trunk/Documentation/DMA-mapping.txt +++ b/trunk/Documentation/DMA-mapping.txt @@ -199,6 +199,8 @@ address during PCI bus mastering you might do something like: "mydev: 24-bit DMA addressing not available.\n"); goto ignore_this_device; } +[Better use DMA_24BIT_MASK instead of 0x00ffffff. +See linux/include/dma-mapping.h for reference.] When pci_set_dma_mask() is successful, and returns zero, the PCI layer saves away this mask you have provided. The PCI layer will use this diff --git a/trunk/Documentation/RCU/whatisRCU.txt b/trunk/Documentation/RCU/whatisRCU.txt index b4ea51ad3610..07cb93b82ba9 100644 --- a/trunk/Documentation/RCU/whatisRCU.txt +++ b/trunk/Documentation/RCU/whatisRCU.txt @@ -605,7 +605,7 @@ are the same as those shown in the preceding section, so they are omitted. { int cpu; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) run_on(cpu); } diff --git a/trunk/Documentation/block/biodoc.txt b/trunk/Documentation/block/biodoc.txt index 8e63831971d5..f989a9e839b4 100644 --- a/trunk/Documentation/block/biodoc.txt +++ b/trunk/Documentation/block/biodoc.txt @@ -132,8 +132,18 @@ Some new queue property settings: limit. No highmem default. blk_queue_max_sectors(q, max_sectors) - Maximum size request you can handle in units of 512 byte - sectors. 255 default. + Sets two variables that limit the size of the request. + + - The request queue's max_sectors, which is a soft size in + in units of 512 byte sectors, and could be dynamically varied + by the core kernel. + + - The request queue's max_hw_sectors, which is a hard limit + and reflects the maximum size request a driver can handle + in units of 512 byte sectors. + + The default for both max_sectors and max_hw_sectors is + 255. The upper limit of max_sectors is 1024. blk_queue_max_phys_segments(q, max_segments) Maximum physical segments you can handle in a request. 128 diff --git a/trunk/Documentation/cachetlb.txt b/trunk/Documentation/cachetlb.txt index 4ae418889b88..53245c429f7d 100644 --- a/trunk/Documentation/cachetlb.txt +++ b/trunk/Documentation/cachetlb.txt @@ -362,6 +362,27 @@ maps this page at its virtual address. likely that you will need to flush the instruction cache for copy_to_user_page(). + void flush_anon_page(struct page *page, unsigned long vmaddr) + When the kernel needs to access the contents of an anonymous + page, it calls this function (currently only + get_user_pages()). Note: flush_dcache_page() deliberately + doesn't work for an anonymous page. The default + implementation is a nop (and should remain so for all coherent + architectures). For incoherent architectures, it should flush + the cache of the page at vmaddr in the current user process. + + void flush_kernel_dcache_page(struct page *page) + When the kernel needs to modify a user page is has obtained + with kmap, it calls this function after all modifications are + complete (but before kunmapping it) to bring the underlying + page up to date. It is assumed here that the user has no + incoherent cached copies (i.e. the original page was obtained + from a mechanism like get_user_pages()). The default + implementation is a nop and should remain so on all coherent + architectures. On incoherent architectures, this should flush + the kernel cache for page (using page_address(page)). + + void flush_icache_range(unsigned long start, unsigned long end) When the kernel stores into addresses that it will execute out of (eg when loading modules), this function is called. diff --git a/trunk/Documentation/cpu-hotplug.txt b/trunk/Documentation/cpu-hotplug.txt index 57a09f99ecb0..1bcf69996c9d 100644 --- a/trunk/Documentation/cpu-hotplug.txt +++ b/trunk/Documentation/cpu-hotplug.txt @@ -97,13 +97,13 @@ at which time hotplug is disabled. You really dont need to manipulate any of the system cpu maps. They should be read-only for most use. When setting up per-cpu resources almost always use -cpu_possible_map/for_each_cpu() to iterate. +cpu_possible_map/for_each_possible_cpu() to iterate. Never use anything other than cpumask_t to represent bitmap of CPUs. #include -for_each_cpu - Iterate over cpu_possible_map +for_each_possible_cpu - Iterate over cpu_possible_map for_each_online_cpu - Iterate over cpu_online_map for_each_present_cpu - Iterate over cpu_present_map for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. diff --git a/trunk/Documentation/cputopology.txt b/trunk/Documentation/cputopology.txt index ff280e2e1613..2b28e9ec4e3a 100644 --- a/trunk/Documentation/cputopology.txt +++ b/trunk/Documentation/cputopology.txt @@ -1,5 +1,5 @@ -Export cpu topology info by sysfs. Items (attributes) are similar +Export cpu topology info via sysfs. Items (attributes) are similar to /proc/cpuinfo. 1) /sys/devices/system/cpu/cpuX/topology/physical_package_id: @@ -12,7 +12,7 @@ represent the thread siblings to cpu X in the same core; represent the thread siblings to cpu X in the same physical package; To implement it in an architecture-neutral way, a new source file, -driver/base/topology.c, is to export the 5 attributes. +drivers/base/topology.c, is to export the 4 attributes. If one architecture wants to support this feature, it just needs to implement 4 defines, typically in file include/asm-XXX/topology.h. diff --git a/trunk/Documentation/drivers/edac/edac.txt b/trunk/Documentation/drivers/edac/edac.txt index d37191fe5681..70d96a62e5e1 100644 --- a/trunk/Documentation/drivers/edac/edac.txt +++ b/trunk/Documentation/drivers/edac/edac.txt @@ -21,7 +21,7 @@ within the computer system. In the initial release, memory Correctable Errors Detecting CE events, then harvesting those events and reporting them, CAN be a predictor of future UE events. With CE events, the system can -continue to operate, but with less safety. Preventive maintainence and +continue to operate, but with less safety. Preventive maintenance and proactive part replacement of memory DIMMs exhibiting CEs can reduce the likelihood of the dreaded UE events and system 'panics'. @@ -29,13 +29,13 @@ the likelihood of the dreaded UE events and system 'panics'. In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices in order to determine if errors are occurring on data transfers. The presence of PCI Parity errors must be examined with a grain of salt. -There are several addin adapters that do NOT follow the PCI specification +There are several add-in adapters that do NOT follow the PCI specification with regards to Parity generation and reporting. The specification says the vendor should tie the parity status bits to 0 if they do not intend to generate parity. Some vendors do not do this, and thus the parity bit can "float" giving false positives. -The PCI Parity EDAC device has the ability to "skip" known flakey +The PCI Parity EDAC device has the ability to "skip" known flaky cards during the parity scan. These are set by the parity "blacklist" interface in the sysfs for PCI Parity. (See the PCI section in the sysfs section below.) There is also a parity "whitelist" which is used as @@ -101,7 +101,7 @@ Memory Controller (mc) Model First a background on the memory controller's model abstracted in EDAC. Each mc device controls a set of DIMM memory modules. These modules are -layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can +laid out in a Chip-Select Row (csrowX) and Channel table (chX). There can be multiple csrows and two channels. Memory controllers allow for several csrows, with 8 csrows being a typical value. @@ -131,7 +131,7 @@ for memory DIMMs: DIMM_B1 Labels for these slots are usually silk screened on the motherboard. Slots -labeled 'A' are channel 0 in this example. Slots labled 'B' +labeled 'A' are channel 0 in this example. Slots labeled 'B' are channel 1. Notice that there are two csrows possible on a physical DIMM. These csrows are allocated their csrow assignment based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM @@ -140,7 +140,7 @@ is placed in each Channel, the csrows cross both DIMMs. Memory DIMMs come single or dual "ranked". A rank is a populated csrow. Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above will have 1 csrow, csrow0. csrow1 will be empty. On the other hand, -when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and +when 2 dual ranked DIMMs are similarly placed, then both csrow0 and csrow1 will be populated. The pattern repeats itself for csrow2 and csrow3. @@ -246,7 +246,7 @@ Module Version read-only attribute file: 'mc_version' - The EDAC CORE modules's version and compile date are shown here to + The EDAC CORE module's version and compile date are shown here to indicate what EDAC is running. @@ -423,7 +423,7 @@ Total memory managed by this csrow attribute file: 'size_mb' This attribute file displays, in count of megabytes, of memory - that this csrow contatins. + that this csrow contains. Memory Type attribute file: @@ -557,7 +557,7 @@ On Header Type 00 devices the primary status is looked at for any parity error regardless of whether Parity is enabled on the device. (The spec indicates parity is generated in some cases). On Header Type 01 bridges, the secondary status register is also -looked at to see if parity ocurred on the bus on the other side of +looked at to see if parity occurred on the bus on the other side of the bridge. @@ -588,7 +588,7 @@ Panic on PCI PARITY Error: 'panic_on_pci_parity' - This control files enables or disables panic'ing when a parity + This control files enables or disables panicking when a parity error has been detected. @@ -616,12 +616,12 @@ PCI Device Whitelist: This control file allows for an explicit list of PCI devices to be scanned for parity errors. Only devices found on this list will - be examined. The list is a line of hexadecimel VENDOR and DEVICE + be examined. The list is a line of hexadecimal VENDOR and DEVICE ID tuples: 1022:7450,1434:16a6 - One or more can be inserted, seperated by a comma. + One or more can be inserted, separated by a comma. To write the above list doing the following as one command line: @@ -639,11 +639,11 @@ PCI Device Blacklist: This control file allows for a list of PCI devices to be skipped for scanning. - The list is a line of hexadecimel VENDOR and DEVICE ID tuples: + The list is a line of hexadecimal VENDOR and DEVICE ID tuples: 1022:7450,1434:16a6 - One or more can be inserted, seperated by a comma. + One or more can be inserted, separated by a comma. To write the above list doing the following as one command line: @@ -651,14 +651,14 @@ PCI Device Blacklist: > /sys/devices/system/edac/pci/pci_parity_blacklist - To display what the whitelist current contatins, + To display what the whitelist currently contains, simply 'cat' the same file. ======================================================================= PCI Vendor and Devices IDs can be obtained with the lspci command. Using the -n option lspci will display the vendor and device IDs. The system -adminstrator will have to determine which devices should be scanned or +administrator will have to determine which devices should be scanned or skipped. @@ -669,5 +669,5 @@ Turn OFF a whitelist by an empty echo command: echo > /sys/devices/system/edac/pci/pci_parity_whitelist -and any previous blacklist will be utililzed. +and any previous blacklist will be utilized. diff --git a/trunk/Documentation/filesystems/00-INDEX b/trunk/Documentation/filesystems/00-INDEX index 74052d22d868..66fdc0744fe0 100644 --- a/trunk/Documentation/filesystems/00-INDEX +++ b/trunk/Documentation/filesystems/00-INDEX @@ -1,27 +1,47 @@ 00-INDEX - this file (info on some of the filesystems supported by linux). +Exporting + - explanation of how to make filesystems exportable. Locking - info on locking rules as they pertain to Linux VFS. adfs.txt - info and mount options for the Acorn Advanced Disc Filing System. +afs.txt + - info and examples for the distributed AFS (Andrew File System) fs. affs.txt - info and mount options for the Amiga Fast File System. +automount-support.txt + - information about filesystem automount support. +befs.txt + - information about the BeOS filesystem for Linux. bfs.txt - info for the SCO UnixWare Boot Filesystem (BFS). cifs.txt - - description of the CIFS filesystem + - description of the CIFS filesystem. coda.txt - description of the CODA filesystem. configfs/ - directory containing configfs documentation and example code. cramfs.txt - - info on the cram filesystem for small storage (ROMs etc) + - info on the cram filesystem for small storage (ROMs etc). +dentry-locking.txt + - info on the RCU-based dcache locking model. devfs/ - directory containing devfs documentation. +directory-locking + - info about the locking scheme used for directory operations. dlmfs.txt - info on the userspace interface to the OCFS2 DLM. ext2.txt - info, mount options and specifications for the Ext2 filesystem. +ext3.txt + - info, mount options and specifications for the Ext3 filesystem. +files.txt + - info on file management in the Linux kernel. +fuse.txt + - info on the Filesystem in User SpacE including mount options. +hfs.txt + - info on the Macintosh HFS Filesystem for Linux. hpfs.txt - info and mount options for the OS/2 HPFS. isofs.txt @@ -32,23 +52,43 @@ ncpfs.txt - info on Novell Netware(tm) filesystem using NCP protocol. ntfs.txt - info and mount options for the NTFS filesystem (Windows NT). -proc.txt - - info on Linux's /proc filesystem. ocfs2.txt - info and mount options for the OCFS2 clustered filesystem. +porting + - various information on filesystem porting. +proc.txt + - info on Linux's /proc filesystem. +ramfs-rootfs-initramfs.txt + - info on the 'in memory' filesystems ramfs, rootfs and initramfs. +reiser4.txt + - info on the Reiser4 filesystem based on dancing tree algorithms. +relayfs.txt + - info on relayfs, for efficient streaming from kernel to user space. romfs.txt - - Description of the ROMFS filesystem. + - description of the ROMFS filesystem. smbfs.txt - - info on using filesystems with the SMB protocol (Windows 3.11 and NT) + - info on using filesystems with the SMB protocol (Win 3.11 and NT). +spufs.txt + - info and mount options for the SPU filesystem used on Cell. +sysfs-pci.txt + - info on accessing PCI device resources through sysfs. +sysfs.txt + - info on sysfs, a ram-based filesystem for exporting kernel objects. sysv-fs.txt - info on the SystemV/V7/Xenix/Coherent filesystem. +tmpfs.txt + - info on tmpfs, a filesystem that holds all files in virtual memory. udf.txt - info and mount options for the UDF filesystem. ufs.txt - info on the ufs filesystem. +v9fs.txt + - v9fs is a Unix implementation of the Plan 9 9p remote fs protocol. vfat.txt - info on using the VFAT filesystem used in Windows NT and Windows 95 vfs.txt - - Overview of the Virtual File System + - overview of the Virtual File System xfs.txt - info and mount options for the XFS filesystem. +xip.txt + - info on execute-in-place for file mappings. diff --git a/trunk/Documentation/ioctl-number.txt b/trunk/Documentation/ioctl-number.txt index aa7ba00ec082..171a44ebd939 100644 --- a/trunk/Documentation/ioctl-number.txt +++ b/trunk/Documentation/ioctl-number.txt @@ -78,8 +78,6 @@ Code Seq# Include File Comments '#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem '1' 00-1F PPS kit from Ulrich Windl -'6' 00-10 Intel IA32 microcode update driver - '8' all SNP8023 advanced NIC card 'A' 00-1F linux/apm_bios.h diff --git a/trunk/Documentation/m68k/README.buddha b/trunk/Documentation/m68k/README.buddha index bf802ffc98ad..ef484a719bb9 100644 --- a/trunk/Documentation/m68k/README.buddha +++ b/trunk/Documentation/m68k/README.buddha @@ -29,7 +29,7 @@ address is written to $4a, then the whole Byte is written to $48, while it doesn't matter how often you're writing to $4a as long as $48 is not touched. After $48 has been written, the whole card disappears from $e8 and is mapped to the new -address just written. Make shure $4a is written before $48, +address just written. Make sure $4a is written before $48, otherwise your chance is only 1:16 to find the board :-). The local memory-map is even active when mapped to $e8: diff --git a/trunk/Documentation/networking/ifenslave.c b/trunk/Documentation/networking/ifenslave.c index 545447ac503a..a12059886755 100644 --- a/trunk/Documentation/networking/ifenslave.c +++ b/trunk/Documentation/networking/ifenslave.c @@ -87,7 +87,7 @@ * would fail and generate an error message in the system log. * - For opt_c: slave should not be set to the master's setting * while it is running. It was already set during enslave. To - * simplify things, it is now handeled separately. + * simplify things, it is now handled separately. * * - 2003/12/01 - Shmulik Hen * - Code cleanup and style changes diff --git a/trunk/Documentation/networking/vortex.txt b/trunk/Documentation/networking/vortex.txt index 3759acf95b29..6091e5f6794f 100644 --- a/trunk/Documentation/networking/vortex.txt +++ b/trunk/Documentation/networking/vortex.txt @@ -24,36 +24,44 @@ Since kernel 2.3.99-pre6, this driver incorporates the support for the This driver supports the following hardware: - 3c590 Vortex 10Mbps - 3c592 EISA 10mbps Demon/Vortex - 3c597 EISA Fast Demon/Vortex - 3c595 Vortex 100baseTx - 3c595 Vortex 100baseT4 - 3c595 Vortex 100base-MII - 3Com Vortex - 3c900 Boomerang 10baseT - 3c900 Boomerang 10Mbps Combo - 3c900 Cyclone 10Mbps TPO - 3c900B Cyclone 10Mbps T - 3c900 Cyclone 10Mbps Combo - 3c900 Cyclone 10Mbps TPC - 3c900B-FL Cyclone 10base-FL - 3c905 Boomerang 100baseTx - 3c905 Boomerang 100baseT4 - 3c905B Cyclone 100baseTx - 3c905B Cyclone 10/100/BNC - 3c905B-FX Cyclone 100baseFx - 3c905C Tornado - 3c980 Cyclone - 3cSOHO100-TX Hurricane - 3c555 Laptop Hurricane - 3c575 Boomerang CardBus - 3CCFE575 Cyclone CardBus - 3CCFE575CT Cyclone CardBus - 3CCFE656 Cyclone CardBus - 3CCFEM656 Cyclone CardBus - 3c450 Cyclone/unknown - + 3c590 Vortex 10Mbps + 3c592 EISA 10Mbps Demon/Vortex + 3c597 EISA Fast Demon/Vortex + 3c595 Vortex 100baseTx + 3c595 Vortex 100baseT4 + 3c595 Vortex 100base-MII + 3c900 Boomerang 10baseT + 3c900 Boomerang 10Mbps Combo + 3c900 Cyclone 10Mbps TPO + 3c900 Cyclone 10Mbps Combo + 3c900 Cyclone 10Mbps TPC + 3c900B-FL Cyclone 10base-FL + 3c905 Boomerang 100baseTx + 3c905 Boomerang 100baseT4 + 3c905B Cyclone 100baseTx + 3c905B Cyclone 10/100/BNC + 3c905B-FX Cyclone 100baseFx + 3c905C Tornado + 3c920B-EMB-WNM (ATI Radeon 9100 IGP) + 3c980 Cyclone + 3c980C Python-T + 3cSOHO100-TX Hurricane + 3c555 Laptop Hurricane + 3c556 Laptop Tornado + 3c556B Laptop Hurricane + 3c575 [Megahertz] 10/100 LAN CardBus + 3c575 Boomerang CardBus + 3CCFE575BT Cyclone CardBus + 3CCFE575CT Tornado CardBus + 3CCFE656 Cyclone CardBus + 3CCFEM656B Cyclone+Winmodem CardBus + 3CXFEM656C Tornado+Winmodem CardBus + 3c450 HomePNA Tornado + 3c920 Tornado + 3c982 Hydra Dual Port A + 3c982 Hydra Dual Port B + 3c905B-T4 + 3c920B-EMB-WNM Tornado Module parameters ================= @@ -293,11 +301,6 @@ Donald's wake-on-LAN page: http://www.scyld.com/wakeonlan.html -3Com's documentation for many NICs, including the ones supported by -this driver is available at - - http://support.3com.com/partners/developer/developer_form.html - 3Com's DOS-based application for setting up the NICs EEPROMs: ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe @@ -312,10 +315,10 @@ Autonegotiation notes --------------------- The driver uses a one-minute heartbeat for adapting to changes in - the external LAN environment. This means that when, for example, a - machine is unplugged from a hubbed 10baseT LAN plugged into a - switched 100baseT LAN, the throughput will be quite dreadful for up - to sixty seconds. Be patient. + the external LAN environment if link is up and 5 seconds if link is down. + This means that when, for example, a machine is unplugged from a hubbed + 10baseT LAN plugged into a switched 100baseT LAN, the throughput + will be quite dreadful for up to sixty seconds. Be patient. Cisco interoperability note from Walter Wong : diff --git a/trunk/Documentation/pnp.txt b/trunk/Documentation/pnp.txt index af0f6eabfa1c..9529c9c9fd59 100644 --- a/trunk/Documentation/pnp.txt +++ b/trunk/Documentation/pnp.txt @@ -115,6 +115,9 @@ pnp_unregister_protocol pnp_register_driver - adds a PnP driver to the Plug and Play Layer - this includes driver model integration +- returns zero for success or a negative error number for failure; count + calls to the .add() method if you need to know how many devices bind to + the driver pnp_unregister_driver - removes a PnP driver from the Plug and Play Layer diff --git a/trunk/Documentation/robust-futex-ABI.txt b/trunk/Documentation/robust-futex-ABI.txt new file mode 100644 index 000000000000..8529a17ffaa1 --- /dev/null +++ b/trunk/Documentation/robust-futex-ABI.txt @@ -0,0 +1,182 @@ +Started by Paul Jackson + +The robust futex ABI +-------------------- + +Robust_futexes provide a mechanism that is used in addition to normal +futexes, for kernel assist of cleanup of held locks on task exit. + +The interesting data as to what futexes a thread is holding is kept on a +linked list in user space, where it can be updated efficiently as locks +are taken and dropped, without kernel intervention. The only additional +kernel intervention required for robust_futexes above and beyond what is +required for futexes is: + + 1) a one time call, per thread, to tell the kernel where its list of + held robust_futexes begins, and + 2) internal kernel code at exit, to handle any listed locks held + by the exiting thread. + +The existing normal futexes already provide a "Fast Userspace Locking" +mechanism, which handles uncontested locking without needing a system +call, and handles contested locking by maintaining a list of waiting +threads in the kernel. Options on the sys_futex(2) system call support +waiting on a particular futex, and waking up the next waiter on a +particular futex. + +For robust_futexes to work, the user code (typically in a library such +as glibc linked with the application) has to manage and place the +necessary list elements exactly as the kernel expects them. If it fails +to do so, then improperly listed locks will not be cleaned up on exit, +probably causing deadlock or other such failure of the other threads +waiting on the same locks. + +A thread that anticipates possibly using robust_futexes should first +issue the system call: + + asmlinkage long + sys_set_robust_list(struct robust_list_head __user *head, size_t len); + +The pointer 'head' points to a structure in the threads address space +consisting of three words. Each word is 32 bits on 32 bit arch's, or 64 +bits on 64 bit arch's, and local byte order. Each thread should have +its own thread private 'head'. + +If a thread is running in 32 bit compatibility mode on a 64 native arch +kernel, then it can actually have two such structures - one using 32 bit +words for 32 bit compatibility mode, and one using 64 bit words for 64 +bit native mode. The kernel, if it is a 64 bit kernel supporting 32 bit +compatibility mode, will attempt to process both lists on each task +exit, if the corresponding sys_set_robust_list() call has been made to +setup that list. + + The first word in the memory structure at 'head' contains a + pointer to a single linked list of 'lock entries', one per lock, + as described below. If the list is empty, the pointer will point + to itself, 'head'. The last 'lock entry' points back to the 'head'. + + The second word, called 'offset', specifies the offset from the + address of the associated 'lock entry', plus or minus, of what will + be called the 'lock word', from that 'lock entry'. The 'lock word' + is always a 32 bit word, unlike the other words above. The 'lock + word' holds 3 flag bits in the upper 3 bits, and the thread id (TID) + of the thread holding the lock in the bottom 29 bits. See further + below for a description of the flag bits. + + The third word, called 'list_op_pending', contains transient copy of + the address of the 'lock entry', during list insertion and removal, + and is needed to correctly resolve races should a thread exit while + in the middle of a locking or unlocking operation. + +Each 'lock entry' on the single linked list starting at 'head' consists +of just a single word, pointing to the next 'lock entry', or back to +'head' if there are no more entries. In addition, nearby to each 'lock +entry', at an offset from the 'lock entry' specified by the 'offset' +word, is one 'lock word'. + +The 'lock word' is always 32 bits, and is intended to be the same 32 bit +lock variable used by the futex mechanism, in conjunction with +robust_futexes. The kernel will only be able to wakeup the next thread +waiting for a lock on a threads exit if that next thread used the futex +mechanism to register the address of that 'lock word' with the kernel. + +For each futex lock currently held by a thread, if it wants this +robust_futex support for exit cleanup of that lock, it should have one +'lock entry' on this list, with its associated 'lock word' at the +specified 'offset'. Should a thread die while holding any such locks, +the kernel will walk this list, mark any such locks with a bit +indicating their holder died, and wakeup the next thread waiting for +that lock using the futex mechanism. + +When a thread has invoked the above system call to indicate it +anticipates using robust_futexes, the kernel stores the passed in 'head' +pointer for that task. The task may retrieve that value later on by +using the system call: + + asmlinkage long + sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, + size_t __user *len_ptr); + +It is anticipated that threads will use robust_futexes embedded in +larger, user level locking structures, one per lock. The kernel +robust_futex mechanism doesn't care what else is in that structure, so +long as the 'offset' to the 'lock word' is the same for all +robust_futexes used by that thread. The thread should link those locks +it currently holds using the 'lock entry' pointers. It may also have +other links between the locks, such as the reverse side of a double +linked list, but that doesn't matter to the kernel. + +By keeping its locks linked this way, on a list starting with a 'head' +pointer known to the kernel, the kernel can provide to a thread the +essential service available for robust_futexes, which is to help clean +up locks held at the time of (a perhaps unexpectedly) exit. + +Actual locking and unlocking, during normal operations, is handled +entirely by user level code in the contending threads, and by the +existing futex mechanism to wait for, and wakeup, locks. The kernels +only essential involvement in robust_futexes is to remember where the +list 'head' is, and to walk the list on thread exit, handling locks +still held by the departing thread, as described below. + +There may exist thousands of futex lock structures in a threads shared +memory, on various data structures, at a given point in time. Only those +lock structures for locks currently held by that thread should be on +that thread's robust_futex linked lock list a given time. + +A given futex lock structure in a user shared memory region may be held +at different times by any of the threads with access to that region. The +thread currently holding such a lock, if any, is marked with the threads +TID in the lower 29 bits of the 'lock word'. + +When adding or removing a lock from its list of held locks, in order for +the kernel to correctly handle lock cleanup regardless of when the task +exits (perhaps it gets an unexpected signal 9 in the middle of +manipulating this list), the user code must observe the following +protocol on 'lock entry' insertion and removal: + +On insertion: + 1) set the 'list_op_pending' word to the address of the 'lock word' + to be inserted, + 2) acquire the futex lock, + 3) add the lock entry, with its thread id (TID) in the bottom 29 bits + of the 'lock word', to the linked list starting at 'head', and + 4) clear the 'list_op_pending' word. + +On removal: + 1) set the 'list_op_pending' word to the address of the 'lock word' + to be removed, + 2) remove the lock entry for this lock from the 'head' list, + 2) release the futex lock, and + 2) clear the 'lock_op_pending' word. + +On exit, the kernel will consider the address stored in +'list_op_pending' and the address of each 'lock word' found by walking +the list starting at 'head'. For each such address, if the bottom 29 +bits of the 'lock word' at offset 'offset' from that address equals the +exiting threads TID, then the kernel will do two things: + + 1) if bit 31 (0x80000000) is set in that word, then attempt a futex + wakeup on that address, which will waken the next thread that has + used to the futex mechanism to wait on that address, and + 2) atomically set bit 30 (0x40000000) in the 'lock word'. + +In the above, bit 31 was set by futex waiters on that lock to indicate +they were waiting, and bit 30 is set by the kernel to indicate that the +lock owner died holding the lock. + +The kernel exit code will silently stop scanning the list further if at +any point: + + 1) the 'head' pointer or an subsequent linked list pointer + is not a valid address of a user space word + 2) the calculated location of the 'lock word' (address plus + 'offset') is not the valud address of a 32 bit user space + word + 3) if the list contains more than 1 million (subject to + future kernel configuration changes) elements. + +When the kernel sees a list entry whose 'lock word' doesn't have the +current threads TID in the lower 29 bits, it does nothing with that +entry, and goes on to the next entry. + +Bit 29 (0x20000000) of the 'lock word' is reserved for future use. diff --git a/trunk/Documentation/robust-futexes.txt b/trunk/Documentation/robust-futexes.txt new file mode 100644 index 000000000000..df82d75245a0 --- /dev/null +++ b/trunk/Documentation/robust-futexes.txt @@ -0,0 +1,218 @@ +Started by: Ingo Molnar + +Background +---------- + +what are robust futexes? To answer that, we first need to understand +what futexes are: normal futexes are special types of locks that in the +noncontended case can be acquired/released from userspace without having +to enter the kernel. + +A futex is in essence a user-space address, e.g. a 32-bit lock variable +field. If userspace notices contention (the lock is already owned and +someone else wants to grab it too) then the lock is marked with a value +that says "there's a waiter pending", and the sys_futex(FUTEX_WAIT) +syscall is used to wait for the other guy to release it. The kernel +creates a 'futex queue' internally, so that it can later on match up the +waiter with the waker - without them having to know about each other. +When the owner thread releases the futex, it notices (via the variable +value) that there were waiter(s) pending, and does the +sys_futex(FUTEX_WAKE) syscall to wake them up. Once all waiters have +taken and released the lock, the futex is again back to 'uncontended' +state, and there's no in-kernel state associated with it. The kernel +completely forgets that there ever was a futex at that address. This +method makes futexes very lightweight and scalable. + +"Robustness" is about dealing with crashes while holding a lock: if a +process exits prematurely while holding a pthread_mutex_t lock that is +also shared with some other process (e.g. yum segfaults while holding a +pthread_mutex_t, or yum is kill -9-ed), then waiters for that lock need +to be notified that the last owner of the lock exited in some irregular +way. + +To solve such types of problems, "robust mutex" userspace APIs were +created: pthread_mutex_lock() returns an error value if the owner exits +prematurely - and the new owner can decide whether the data protected by +the lock can be recovered safely. + +There is a big conceptual problem with futex based mutexes though: it is +the kernel that destroys the owner task (e.g. due to a SEGFAULT), but +the kernel cannot help with the cleanup: if there is no 'futex queue' +(and in most cases there is none, futexes being fast lightweight locks) +then the kernel has no information to clean up after the held lock! +Userspace has no chance to clean up after the lock either - userspace is +the one that crashes, so it has no opportunity to clean up. Catch-22. + +In practice, when e.g. yum is kill -9-ed (or segfaults), a system reboot +is needed to release that futex based lock. This is one of the leading +bugreports against yum. + +To solve this problem, the traditional approach was to extend the vma +(virtual memory area descriptor) concept to have a notion of 'pending +robust futexes attached to this area'. This approach requires 3 new +syscall variants to sys_futex(): FUTEX_REGISTER, FUTEX_DEREGISTER and +FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether +they have a robust_head set. This approach has two fundamental problems +left: + + - it has quite complex locking and race scenarios. The vma-based + approach had been pending for years, but they are still not completely + reliable. + + - they have to scan _every_ vma at sys_exit() time, per thread! + +The second disadvantage is a real killer: pthread_exit() takes around 1 +microsecond on Linux, but with thousands (or tens of thousands) of vmas +every pthread_exit() takes a millisecond or more, also totally +destroying the CPU's L1 and L2 caches! + +This is very much noticeable even for normal process sys_exit_group() +calls: the kernel has to do the vma scanning unconditionally! (this is +because the kernel has no knowledge about how many robust futexes there +are to be cleaned up, because a robust futex might have been registered +in another task, and the futex variable might have been simply mmap()-ed +into this process's address space). + +This huge overhead forced the creation of CONFIG_FUTEX_ROBUST so that +normal kernels can turn it off, but worse than that: the overhead makes +robust futexes impractical for any type of generic Linux distribution. + +So something had to be done. + +New approach to robust futexes +------------------------------ + +At the heart of this new approach there is a per-thread private list of +robust locks that userspace is holding (maintained by glibc) - which +userspace list is registered with the kernel via a new syscall [this +registration happens at most once per thread lifetime]. At do_exit() +time, the kernel checks this user-space list: are there any robust futex +locks to be cleaned up? + +In the common case, at do_exit() time, there is no list registered, so +the cost of robust futexes is just a simple current->robust_list != NULL +comparison. If the thread has registered a list, then normally the list +is empty. If the thread/process crashed or terminated in some incorrect +way then the list might be non-empty: in this case the kernel carefully +walks the list [not trusting it], and marks all locks that are owned by +this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if +any). + +The list is guaranteed to be private and per-thread at do_exit() time, +so it can be accessed by the kernel in a lockless way. + +There is one race possible though: since adding to and removing from the +list is done after the futex is acquired by glibc, there is a few +instructions window for the thread (or process) to die there, leaving +the futex hung. To protect against this possibility, userspace (glibc) +also maintains a simple per-thread 'list_op_pending' field, to allow the +kernel to clean up if the thread dies after acquiring the lock, but just +before it could have added itself to the list. Glibc sets this +list_op_pending field before it tries to acquire the futex, and clears +it after the list-add (or list-remove) has finished. + +That's all that is needed - all the rest of robust-futex cleanup is done +in userspace [just like with the previous patches]. + +Ulrich Drepper has implemented the necessary glibc support for this new +mechanism, which fully enables robust mutexes. + +Key differences of this userspace-list based approach, compared to the +vma based method: + + - it's much, much faster: at thread exit time, there's no need to loop + over every vma (!), which the VM-based method has to do. Only a very + simple 'is the list empty' op is done. + + - no VM changes are needed - 'struct address_space' is left alone. + + - no registration of individual locks is needed: robust mutexes dont + need any extra per-lock syscalls. Robust mutexes thus become a very + lightweight primitive - so they dont force the application designer + to do a hard choice between performance and robustness - robust + mutexes are just as fast. + + - no per-lock kernel allocation happens. + + - no resource limits are needed. + + - no kernel-space recovery call (FUTEX_RECOVER) is needed. + + - the implementation and the locking is "obvious", and there are no + interactions with the VM. + +Performance +----------- + +I have benchmarked the time needed for the kernel to process a list of 1 +million (!) held locks, using the new method [on a 2GHz CPU]: + + - with FUTEX_WAIT set [contended mutex]: 130 msecs + - without FUTEX_WAIT set [uncontended mutex]: 30 msecs + +I have also measured an approach where glibc does the lock notification +[which it currently does for !pshared robust mutexes], and that took 256 +msecs - clearly slower, due to the 1 million FUTEX_WAKE syscalls +userspace had to do. + +(1 million held locks are unheard of - we expect at most a handful of +locks to be held at a time. Nevertheless it's nice to know that this +approach scales nicely.) + +Implementation details +---------------------- + +The patch adds two new syscalls: one to register the userspace list, and +one to query the registered list pointer: + + asmlinkage long + sys_set_robust_list(struct robust_list_head __user *head, + size_t len); + + asmlinkage long + sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, + size_t __user *len_ptr); + +List registration is very fast: the pointer is simply stored in +current->robust_list. [Note that in the future, if robust futexes become +widespread, we could extend sys_clone() to register a robust-list head +for new threads, without the need of another syscall.] + +So there is virtually zero overhead for tasks not using robust futexes, +and even for robust futex users, there is only one extra syscall per +thread lifetime, and the cleanup operation, if it happens, is fast and +straightforward. The kernel doesnt have any internal distinction between +robust and normal futexes. + +If a futex is found to be held at exit time, the kernel sets the +following bit of the futex word: + + #define FUTEX_OWNER_DIED 0x40000000 + +and wakes up the next futex waiter (if any). User-space does the rest of +the cleanup. + +Otherwise, robust futexes are acquired by glibc by putting the TID into +the futex field atomically. Waiters set the FUTEX_WAITERS bit: + + #define FUTEX_WAITERS 0x80000000 + +and the remaining bits are for the TID. + +Testing, architecture support +----------------------------- + +i've tested the new syscalls on x86 and x86_64, and have made sure the +parsing of the userspace list is robust [ ;-) ] even if the list is +deliberately corrupted. + +i386 and x86_64 syscalls are wired up at the moment, and Ulrich has +tested the new glibc code (on x86_64 and i386), and it works for his +robust-mutex testcases. + +All other architectures should build just fine too - but they wont have +the new syscalls yet. + +Architectures need to implement the new futex_atomic_cmpxchg_inatomic() +inline function before writing up the syscalls (that function returns +-ENOSYS right now). diff --git a/trunk/Documentation/rpc-cache.txt b/trunk/Documentation/rpc-cache.txt index 2b5d4434fa5a..5f757c8cf979 100644 --- a/trunk/Documentation/rpc-cache.txt +++ b/trunk/Documentation/rpc-cache.txt @@ -1,4 +1,4 @@ -This document gives a brief introduction to the caching + This document gives a brief introduction to the caching mechanisms in the sunrpc layer that is used, in particular, for NFS authentication. @@ -25,25 +25,17 @@ The common code handles such things as: - supporting 'NEGATIVE' as well as positive entries - allowing an EXPIRED time on cache items, and removing items after they expire, and are no longe in-use. - - Future code extensions are expect to handle - making requests to user-space to fill in cache entries - allowing user-space to directly set entries in the cache - delaying RPC requests that depend on as-yet incomplete cache entries, and replaying those requests when the cache entry is complete. - - maintaining last-access times on cache entries - - clean out old entries when the caches become full - -The code for performing a cache lookup is also common, but in the form -of a template. i.e. a #define. -Each cache defines a lookup function by using the DefineCacheLookup -macro, or the simpler DefineSimpleCacheLookup macro + - clean out old entries as they expire. Creating a Cache ---------------- -1/ A cache needs a datum to cache. This is in the form of a +1/ A cache needs a datum to store. This is in the form of a structure definition that must contain a struct cache_head as an element, usually the first. @@ -51,35 +43,69 @@ Creating a Cache Each cache element is reference counted and contains expiry and update times for use in cache management. 2/ A cache needs a "cache_detail" structure that - describes the cache. This stores the hash table, and some - parameters for cache management. -3/ A cache needs a lookup function. This is created using - the DefineCacheLookup macro. This lookup function is used both - to find entries and to update entries. The normal mode for - updating an entry is to replace the old entry with a new - entry. However it is possible to allow update-in-place - for those caches where it makes sense (no atomicity issues - or indirect reference counting issue) -4/ A cache needs to be registered using cache_register(). This - includes in on a list of caches that will be regularly - cleaned to discard old data. For this to work, some - thread must periodically call cache_clean - + describes the cache. This stores the hash table, some + parameters for cache management, and some operations detailing how + to work with particular cache items. + The operations requires are: + struct cache_head *alloc(void) + This simply allocates appropriate memory and returns + a pointer to the cache_detail embedded within the + structure + void cache_put(struct kref *) + This is called when the last reference to an item is + is dropped. The pointer passed is to the 'ref' field + in the cache_head. cache_put should release any + references create by 'cache_init' and, if CACHE_VALID + is set, any references created by cache_update. + It should then release the memory allocated by + 'alloc'. + int match(struct cache_head *orig, struct cache_head *new) + test if the keys in the two structures match. Return + 1 if they do, 0 if they don't. + void init(struct cache_head *orig, struct cache_head *new) + Set the 'key' fields in 'new' from 'orig'. This may + include taking references to shared objects. + void update(struct cache_head *orig, struct cache_head *new) + Set the 'content' fileds in 'new' from 'orig'. + int cache_show(struct seq_file *m, struct cache_detail *cd, + struct cache_head *h) + Optional. Used to provide a /proc file that lists the + contents of a cache. This should show one item, + usually on just one line. + int cache_request(struct cache_detail *cd, struct cache_head *h, + char **bpp, int *blen) + Format a request to be send to user-space for an item + to be instantiated. *bpp is a buffer of size *blen. + bpp should be moved forward over the encoded message, + and *blen should be reduced to show how much free + space remains. Return 0 on success or <0 if not + enough room or other problem. + int cache_parse(struct cache_detail *cd, char *buf, int len) + A message from user space has arrived to fill out a + cache entry. It is in 'buf' of length 'len'. + cache_parse should parse this, find the item in the + cache with sunrpc_cache_lookup, and update the item + with sunrpc_cache_update. + + +3/ A cache needs to be registered using cache_register(). This + includes it on a list of caches that will be regularly + cleaned to discard old data. + Using a cache ------------- -To find a value in a cache, call the lookup function passing it a the -datum which contains key, and possibly content, and a flag saying -whether to update the cache with new data from the datum. Depending -on how the cache lookup function was defined, it may take an extra -argument to identify the particular cache in question. +To find a value in a cache, call sunrpc_cache_lookup passing a pointer +to the cache_head in a sample item with the 'key' fields filled in. +This will be passed to ->match to identify the target entry. If no +entry is found, a new entry will be create, added to the cache, and +marked as not containing valid data. -Except in cases of kmalloc failure, the lookup function -will return a new datum which will store the key and -may contain valid content, or may not. -This datum is typically passed to cache_check which determines the -validity of the datum and may later initiate an upcall to fill -in the data. +The item returned is typically passed to cache_check which will check +if the data is valid, and may initiate an up-call to get fresh data. +cache_check will return -ENOENT in the entry is negative or if an up +call is needed but not possible, -EAGAIN if an upcall is pending, +or 0 if the data is valid; cache_check can be passed a "struct cache_req *". This structure is typically embedded in the actual request and can be used to create a @@ -90,6 +116,13 @@ item does become valid, the deferred copy of the request will be revisited (->revisit). It is expected that this method will reschedule the request for processing. +The value returned by sunrpc_cache_lookup can also be passed to +sunrpc_cache_update to set the content for the item. A second item is +passed which should hold the content. If the item found by _lookup +has valid data, then it is discarded and a new item is created. This +saves any user of an item from worrying about content changing while +it is being inspected. If the item found by _lookup does not contain +valid data, then the content is copied across and CACHE_VALID is set. Populating a cache ------------------ @@ -114,8 +147,8 @@ should be create or updated to have the given content, and the expiry time should be set on that item. Reading from a channel is a bit more interesting. When a cache -lookup fail, or when it suceeds but finds an entry that may soon -expiry, a request is lodged for that cache item to be updated by +lookup fails, or when it succeeds but finds an entry that may soon +expire, a request is lodged for that cache item to be updated by user-space. These requests appear in the channel file. Successive reads will return successive requests. @@ -130,7 +163,7 @@ Thus a user-space helper is likely to: write a response loop. -If it dies and needs to be restarted, any requests that have not be +If it dies and needs to be restarted, any requests that have not been answered will still appear in the file and will be read by the new instance of the helper. @@ -142,10 +175,9 @@ Each cache should also define a "cache_request" method which takes a cache item and encodes a request into the buffer provided. - Note: If a cache has no active readers on the channel, and has had not active readers for more than 60 seconds, further requests will not be -added to the channel but instead all looks that do not find a valid +added to the channel but instead all lookups that do not find a valid entry will fail. This is partly for backward compatibility: The previous nfs exports table was deemed to be authoritative and a failed lookup meant a definite 'no'. @@ -154,18 +186,17 @@ request/response format ----------------------- While each cache is free to use it's own format for requests -and responses over channel, the following is recommended are +and responses over channel, the following is recommended as appropriate and support routines are available to help: Each request or response record should be printable ASCII with precisely one newline character which should be at the end. Fields within the record should be separated by spaces, normally one. If spaces, newlines, or nul characters are needed in a field they -much be quotes. two mechanisms are available: +much be quoted. two mechanisms are available: 1/ If a field begins '\x' then it must contain an even number of hex digits, and pairs of these digits provide the bytes in the field. 2/ otherwise a \ in the field must be followed by 3 octal digits which give the code for a byte. Other characters are treated - as them selves. At the very least, space, newlines nul, and + as them selves. At the very least, space, newline, nul, and '\' must be quoted in this way. - diff --git a/trunk/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/trunk/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl index 6dc9d9f622ca..6feef9e82b63 100644 --- a/trunk/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl +++ b/trunk/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl @@ -2836,7 +2836,7 @@ struct _snd_pcm_runtime { Note that this callback became non-atomic since the recent version. - You can use schedule-related fucntions safely in this callback now. + You can use schedule-related functions safely in this callback now. diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 4e8fbbc5566d..e5b051f0e27e 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -882,13 +882,34 @@ W: http://ebtables.sourceforge.net/ S: Maintained EDAC-CORE -P: Doug Thompson -M: norsk5@xmission.com, dthompson@linuxnetworx.com -P: Dave Peterson -M: dsp@llnl.gov, dave_peterson@pobox.com -L: bluesmoke-devel@lists.sourceforge.net -W: bluesmoke.sourceforge.net -S: Maintained +P: Doug Thompson +M: norsk5@xmission.com, dthompson@linuxnetworx.com +P: Dave Peterson +M: dsp@llnl.gov, dave_peterson@pobox.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained + +EDAC-E752X +P: Dave Peterson +M: dsp@llnl.gov, dave_peterson@pobox.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained + +EDAC-E7XXX +P: Dave Peterson +M: dsp@llnl.gov, dave_peterson@pobox.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained + +EDAC-R82600 +P: Tim Small +M: tim@buttersideup.com +L: bluesmoke-devel@lists.sourceforge.net +W: bluesmoke.sourceforge.net +S: Maintained EEPRO100 NETWORK DRIVER P: Andrey V. Savochkin @@ -1039,6 +1060,15 @@ M: khc@pm.waw.pl W: http://www.kernel.org/pub/linux/utils/net/hdlc/ S: Maintained +GIGASET ISDN DRIVERS +P: Hansjoerg Lipp +M: hjlipp@web.de +P: Tilman Schmidt +M: tilman@imap.cc +L: gigaset307x-common@lists.sourceforge.net +W: http://gigaset307x.sourceforge.net/ +S: Maintained + HARDWARE MONITORING P: Jean Delvare M: khali@linux-fr.org @@ -2203,6 +2233,12 @@ M: p_gortmaker@yahoo.com L: linux-kernel@vger.kernel.org S: Maintained +REAL TIME CLOCK (RTC) SUBSYSTEM +P: Alessandro Zummo +M: a.zummo@towertech.it +L: linux-kernel@vger.kernel.org +S: Maintained + REISERFS FILE SYSTEM P: Hans Reiser M: reiserfs-dev@namesys.com diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig index eedf41bf7057..9bef61b30367 100644 --- a/trunk/arch/alpha/Kconfig +++ b/trunk/arch/alpha/Kconfig @@ -25,6 +25,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -447,6 +451,10 @@ config ALPHA_IRONGATE depends on ALPHA_NAUTILUS default y +config GENERIC_HWEIGHT + bool + default y if !ALPHA_EV6 && !ALPHA_EV67 + config ALPHA_AVANTI bool depends on ALPHA_XL || ALPHA_AVANTI_CH diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c index 7fb14f42a125..31afe3d91ac6 100644 --- a/trunk/arch/alpha/kernel/osf_sys.c +++ b/trunk/arch/alpha/kernel/osf_sys.c @@ -821,7 +821,6 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; -extern int do_adjtimex(struct timex *); struct timeval32 { diff --git a/trunk/arch/alpha/kernel/setup.c b/trunk/arch/alpha/kernel/setup.c index b4e5f8ff2b25..dd8769670596 100644 --- a/trunk/arch/alpha/kernel/setup.c +++ b/trunk/arch/alpha/kernel/setup.c @@ -34,6 +34,7 @@ #include #include #include +#include #ifdef CONFIG_MAGIC_SYSRQ #include #include @@ -42,7 +43,7 @@ #include #include -extern struct notifier_block *panic_notifier_list; +extern struct atomic_notifier_head panic_notifier_list; static int alpha_panic_event(struct notifier_block *, unsigned long, void *); static struct notifier_block alpha_panic_block = { alpha_panic_event, @@ -241,9 +242,6 @@ reserve_std_resources(void) request_resource(io, standard_io_resources+i); } -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) #define PFN_MAX PFN_DOWN(0x80000000) #define for_each_mem_cluster(memdesc, cluster, i) \ for ((cluster) = (memdesc)->cluster, (i) = 0; \ @@ -472,11 +470,6 @@ page_is_ram(unsigned long pfn) return 0; } -#undef PFN_UP -#undef PFN_DOWN -#undef PFN_PHYS -#undef PFN_MAX - void __init setup_arch(char **cmdline_p) { @@ -507,7 +500,8 @@ setup_arch(char **cmdline_p) } /* Register a call for panic conditions. */ - notifier_chain_register(&panic_notifier_list, &alpha_panic_block); + atomic_notifier_chain_register(&panic_notifier_list, + &alpha_panic_block); #ifdef CONFIG_ALPHA_GENERIC /* Assume that we've booted from SRM if we haven't booted from MILO. diff --git a/trunk/arch/alpha/kernel/time.c b/trunk/arch/alpha/kernel/time.c index 6b2921be1909..3859749810b4 100644 --- a/trunk/arch/alpha/kernel/time.c +++ b/trunk/arch/alpha/kernel/time.c @@ -314,10 +314,11 @@ time_init(void) if (!est_cycle_freq) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); - cc1 = rpcc_after_update_in_progress(); + cc1 = rpcc(); /* Calibrate CPU clock -- attempt #2. */ if (!est_cycle_freq) { + cc1 = rpcc_after_update_in_progress(); cc2 = rpcc_after_update_in_progress(); est_cycle_freq = validate_cc_value(cc2 - cc1); cc1 = cc2; diff --git a/trunk/arch/alpha/lib/ev6-memchr.S b/trunk/arch/alpha/lib/ev6-memchr.S index a8e843dbcc23..1a5f71b9d8b1 100644 --- a/trunk/arch/alpha/lib/ev6-memchr.S +++ b/trunk/arch/alpha/lib/ev6-memchr.S @@ -84,7 +84,7 @@ $last_quad: beq $2, $not_found # U : U L U L $found_it: -#if defined(__alpha_fix__) && defined(__alpha_cix__) +#ifdef CONFIG_ALPHA_EV67 /* * Since we are guaranteed to have set one of the bits, we don't * have to worry about coming back with a 0x40 out of cttz... diff --git a/trunk/arch/alpha/lib/fpreg.c b/trunk/arch/alpha/lib/fpreg.c index 97c4d9d7a4d5..05017ba34c3c 100644 --- a/trunk/arch/alpha/lib/fpreg.c +++ b/trunk/arch/alpha/lib/fpreg.c @@ -4,7 +4,7 @@ * (C) Copyright 1998 Linus Torvalds */ -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); #else #define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val)); @@ -53,7 +53,7 @@ alpha_read_fp_reg (unsigned long reg) return val; } -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); #else #define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val)); @@ -98,7 +98,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val) } } -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); #else #define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val)); @@ -147,7 +147,7 @@ alpha_read_fp_reg_s (unsigned long reg) return val; } -#if defined(__alpha_cix__) || defined(__alpha_fix__) +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); #else #define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val)); diff --git a/trunk/arch/alpha/mm/numa.c b/trunk/arch/alpha/mm/numa.c index 6d5251254f68..bf6b65c81bef 100644 --- a/trunk/arch/alpha/mm/numa.c +++ b/trunk/arch/alpha/mm/numa.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -27,9 +28,6 @@ bootmem_data_t node_bdata[MAX_NUMNODES]; #define DBGDCONT(args...) #endif -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) #define for_each_mem_cluster(memdesc, cluster, i) \ for ((cluster) = (memdesc)->cluster, (i) = 0; \ (i) < (memdesc)->numclusters; (i)++, (cluster)++) diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 0dd24ebdf6ac..9731b3f826ab 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration" config ARM bool default y + select RTC_LIB help The ARM series is a line of low-power-consumption RISC chip designs licensed by ARM Ltd and targeted at embedded applications and @@ -53,6 +54,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -835,6 +840,8 @@ source "drivers/usb/Kconfig" source "drivers/mmc/Kconfig" +source "drivers/rtc/Kconfig" + endmenu source "fs/Kconfig" diff --git a/trunk/arch/arm/common/rtctime.c b/trunk/arch/arm/common/rtctime.c index e851d86c212c..35c9a64ac14c 100644 --- a/trunk/arch/arm/common/rtctime.c +++ b/trunk/arch/arm/common/rtctime.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -42,89 +43,6 @@ static struct rtc_ops *rtc_ops; #define rtc_epoch 1900UL -static const unsigned char days_in_month[] = { - 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 -}; - -#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400) -#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400)) - -static int month_days(unsigned int month, unsigned int year) -{ - return days_in_month[month] + (LEAP_YEAR(year) && month == 1); -} - -/* - * Convert seconds since 01-01-1970 00:00:00 to Gregorian date. - */ -void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) -{ - int days, month, year; - - days = time / 86400; - time -= days * 86400; - - tm->tm_wday = (days + 4) % 7; - - year = 1970 + days / 365; - days -= (year - 1970) * 365 - + LEAPS_THRU_END_OF(year - 1) - - LEAPS_THRU_END_OF(1970 - 1); - if (days < 0) { - year -= 1; - days += 365 + LEAP_YEAR(year); - } - tm->tm_year = year - 1900; - tm->tm_yday = days + 1; - - for (month = 0; month < 11; month++) { - int newdays; - - newdays = days - month_days(month, year); - if (newdays < 0) - break; - days = newdays; - } - tm->tm_mon = month; - tm->tm_mday = days + 1; - - tm->tm_hour = time / 3600; - time -= tm->tm_hour * 3600; - tm->tm_min = time / 60; - tm->tm_sec = time - tm->tm_min * 60; -} -EXPORT_SYMBOL(rtc_time_to_tm); - -/* - * Does the rtc_time represent a valid date/time? - */ -int rtc_valid_tm(struct rtc_time *tm) -{ - if (tm->tm_year < 70 || - tm->tm_mon >= 12 || - tm->tm_mday < 1 || - tm->tm_mday > month_days(tm->tm_mon, tm->tm_year + 1900) || - tm->tm_hour >= 24 || - tm->tm_min >= 60 || - tm->tm_sec >= 60) - return -EINVAL; - - return 0; -} -EXPORT_SYMBOL(rtc_valid_tm); - -/* - * Convert Gregorian date to seconds since 01-01-1970 00:00:00. - */ -int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) -{ - *time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); - - return 0; -} -EXPORT_SYMBOL(rtc_tm_to_time); - /* * Calculate the next alarm time given the requested alarm time mask * and the current time. @@ -151,13 +69,13 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc } } -static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) +static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm) { memset(tm, 0, sizeof(struct rtc_time)); return ops->read_time(tm); } -static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) +static inline int rtc_arm_set_time(struct rtc_ops *ops, struct rtc_time *tm) { int ret; @@ -168,7 +86,7 @@ static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) return ret; } -static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) +static inline int rtc_arm_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) { int ret = -EINVAL; if (ops->read_alarm) { @@ -178,7 +96,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) return ret; } -static inline int rtc_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) +static inline int rtc_arm_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) { int ret = -EINVAL; if (ops->set_alarm) @@ -266,7 +184,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, switch (cmd) { case RTC_ALM_READ: - ret = rtc_read_alarm(ops, &alrm); + ret = rtc_arm_read_alarm(ops, &alrm); if (ret) break; ret = copy_to_user(uarg, &alrm.time, sizeof(tm)); @@ -288,11 +206,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, alrm.time.tm_wday = -1; alrm.time.tm_yday = -1; alrm.time.tm_isdst = -1; - ret = rtc_set_alarm(ops, &alrm); + ret = rtc_arm_set_alarm(ops, &alrm); break; case RTC_RD_TIME: - ret = rtc_read_time(ops, &tm); + ret = rtc_arm_read_time(ops, &tm); if (ret) break; ret = copy_to_user(uarg, &tm, sizeof(tm)); @@ -310,7 +228,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ret = -EFAULT; break; } - ret = rtc_set_time(ops, &tm); + ret = rtc_arm_set_time(ops, &tm); break; case RTC_EPOCH_SET: @@ -341,11 +259,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, ret = -EFAULT; break; } - ret = rtc_set_alarm(ops, &alrm); + ret = rtc_arm_set_alarm(ops, &alrm); break; case RTC_WKALM_RD: - ret = rtc_read_alarm(ops, &alrm); + ret = rtc_arm_read_alarm(ops, &alrm); if (ret) break; ret = copy_to_user(uarg, &alrm, sizeof(alrm)); @@ -435,7 +353,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo struct rtc_time tm; char *p = page; - if (rtc_read_time(ops, &tm) == 0) { + if (rtc_arm_read_time(ops, &tm) == 0) { p += sprintf(p, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n" @@ -445,7 +363,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo rtc_epoch); } - if (rtc_read_alarm(ops, &alrm) == 0) { + if (rtc_arm_read_alarm(ops, &alrm) == 0) { p += sprintf(p, "alrm_time\t: "); if ((unsigned int)alrm.time.tm_hour <= 24) p += sprintf(p, "%02d:", alrm.time.tm_hour); diff --git a/trunk/arch/arm/lib/copy_template.S b/trunk/arch/arm/lib/copy_template.S index 838e435e4922..cab355c0c1f7 100644 --- a/trunk/arch/arm/lib/copy_template.S +++ b/trunk/arch/arm/lib/copy_template.S @@ -236,7 +236,7 @@ /* - * Abort preanble and completion macros. + * Abort preamble and completion macros. * If a fixup handler is required then those macros must surround it. * It is assumed that the fixup code will handle the private part of * the exit macro. diff --git a/trunk/arch/arm/mach-footbridge/time.c b/trunk/arch/arm/mach-footbridge/time.c index 2c64a0b0502e..5d02e95dede3 100644 --- a/trunk/arch/arm/mach-footbridge/time.c +++ b/trunk/arch/arm/mach-footbridge/time.c @@ -34,27 +34,12 @@ static int rtc_base; static unsigned long __init get_isa_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; - int i; // check to see if the RTC makes sense..... if ((CMOS_READ(RTC_VALID) & RTC_VRT) == 0) return mktime(1970, 1, 1, 0, 0, 0); - /* The Linux interpretation of the CMOS clock register contents: - * When the Update-In-Progress (UIP) flag goes from 1 to 0, the - * RTC registers show the second which has precisely just started. - * Let's hope other operating systems interpret the RTC the same way. - */ - /* read RTC exactly on falling edge of update flag */ - for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - - for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - - do { /* Isn't this overkill ? UIP above should guarantee consistency */ + do { sec = CMOS_READ(RTC_SECONDS); min = CMOS_READ(RTC_MINUTES); hour = CMOS_READ(RTC_HOURS); diff --git a/trunk/arch/arm/mach-integrator/time.c b/trunk/arch/arm/mach-integrator/time.c index 3c22c16b38bf..bc07f52a6fd7 100644 --- a/trunk/arch/arm/mach-integrator/time.c +++ b/trunk/arch/arm/mach-integrator/time.c @@ -40,13 +40,13 @@ static int integrator_set_rtc(void) return 1; } -static int rtc_read_alarm(struct rtc_wkalrm *alrm) +static int integrator_rtc_read_alarm(struct rtc_wkalrm *alrm) { rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); return 0; } -static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) +static inline int integrator_rtc_set_alarm(struct rtc_wkalrm *alrm) { unsigned long time; int ret; @@ -62,7 +62,7 @@ static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) return ret; } -static int rtc_read_time(struct rtc_time *tm) +static int integrator_rtc_read_time(struct rtc_time *tm) { rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); return 0; @@ -76,7 +76,7 @@ static int rtc_read_time(struct rtc_time *tm) * edge of the 1Hz clock, we must write the time one second * in advance. */ -static inline int rtc_set_time(struct rtc_time *tm) +static inline int integrator_rtc_set_time(struct rtc_time *tm) { unsigned long time; int ret; @@ -90,10 +90,10 @@ static inline int rtc_set_time(struct rtc_time *tm) static struct rtc_ops rtc_ops = { .owner = THIS_MODULE, - .read_time = rtc_read_time, - .set_time = rtc_set_time, - .read_alarm = rtc_read_alarm, - .set_alarm = rtc_set_alarm, + .read_time = integrator_rtc_read_time, + .set_time = integrator_rtc_set_time, + .read_alarm = integrator_rtc_read_alarm, + .set_alarm = integrator_rtc_set_alarm, }; static irqreturn_t arm_rtc_interrupt(int irq, void *dev_id, diff --git a/trunk/arch/arm/mach-omap1/board-netstar.c b/trunk/arch/arm/mach-omap1/board-netstar.c index 60d5f8a3339c..7520e602d7a2 100644 --- a/trunk/arch/arm/mach-omap1/board-netstar.c +++ b/trunk/arch/arm/mach-omap1/board-netstar.c @@ -141,7 +141,7 @@ static int __init netstar_late_init(void) /* TODO: Setup front panel switch here */ /* Setup panic notifier */ - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); return 0; } diff --git a/trunk/arch/arm/mach-omap1/board-voiceblue.c b/trunk/arch/arm/mach-omap1/board-voiceblue.c index bfd5fdd1a875..52e4a9d69642 100644 --- a/trunk/arch/arm/mach-omap1/board-voiceblue.c +++ b/trunk/arch/arm/mach-omap1/board-voiceblue.c @@ -235,7 +235,7 @@ static struct notifier_block panic_block = { static int __init voiceblue_setup(void) { /* Setup panic notifier */ - notifier_chain_register(&panic_notifier_list, &panic_block); + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); return 0; } diff --git a/trunk/arch/arm/mach-pxa/generic.c b/trunk/arch/arm/mach-pxa/generic.c index 9b48a90aefce..5efa84749f37 100644 --- a/trunk/arch/arm/mach-pxa/generic.c +++ b/trunk/arch/arm/mach-pxa/generic.c @@ -319,6 +319,11 @@ void __init pxa_set_ficp_info(struct pxaficp_platform_data *info) pxaficp_device.dev.platform_data = info; } +static struct platform_device pxartc_device = { + .name = "sa1100-rtc", + .id = -1, +}; + static struct platform_device *devices[] __initdata = { &pxamci_device, &udc_device, @@ -329,6 +334,7 @@ static struct platform_device *devices[] __initdata = { &pxaficp_device, &i2c_device, &i2s_device, + &pxartc_device, }; static int __init pxa_init(void) diff --git a/trunk/arch/arm/mach-sa1100/generic.c b/trunk/arch/arm/mach-sa1100/generic.c index 2abdc419e984..9ea71551fc04 100644 --- a/trunk/arch/arm/mach-sa1100/generic.c +++ b/trunk/arch/arm/mach-sa1100/generic.c @@ -324,6 +324,11 @@ void sa11x0_set_irda_data(struct irda_platform_data *irda) sa11x0ir_device.dev.platform_data = irda; } +static struct platform_device sa11x0rtc_device = { + .name = "sa1100-rtc", + .id = -1, +}; + static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0udc_device, &sa11x0uart1_device, @@ -333,6 +338,7 @@ static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0pcmcia_device, &sa11x0fb_device, &sa11x0mtd_device, + &sa11x0rtc_device, }; static int __init sa1100_init(void) diff --git a/trunk/arch/arm26/Kconfig b/trunk/arch/arm26/Kconfig index dee23d87fc5a..cf4ebf4c274d 100644 --- a/trunk/arch/arm26/Kconfig +++ b/trunk/arch/arm26/Kconfig @@ -41,6 +41,10 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/trunk/arch/arm26/kernel/traps.c b/trunk/arch/arm26/kernel/traps.c index 5847ea5d7747..a79de041b50e 100644 --- a/trunk/arch/arm26/kernel/traps.c +++ b/trunk/arch/arm26/kernel/traps.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include "ptrace.h" @@ -207,19 +207,19 @@ void die_if_kernel(const char *str, struct pt_regs *regs, int err) die(str, regs, err); } -static DECLARE_MUTEX(undef_sem); +static DEFINE_MUTEX(undef_mutex); static int (*undef_hook)(struct pt_regs *); int request_undef_hook(int (*fn)(struct pt_regs *)) { int ret = -EBUSY; - down(&undef_sem); + mutex_lock(&undef_mutex); if (undef_hook == NULL) { undef_hook = fn; ret = 0; } - up(&undef_sem); + mutex_unlock(&undef_mutex); return ret; } @@ -228,12 +228,12 @@ int release_undef_hook(int (*fn)(struct pt_regs *)) { int ret = -EINVAL; - down(&undef_sem); + mutex_lock(&undef_mutex); if (undef_hook == fn) { undef_hook = NULL; ret = 0; } - up(&undef_sem); + mutex_unlock(&undef_mutex); return ret; } diff --git a/trunk/arch/arm26/mm/init.c b/trunk/arch/arm26/mm/init.c index e3ecaa453747..7da8a5205678 100644 --- a/trunk/arch/arm26/mm/init.c +++ b/trunk/arch/arm26/mm/init.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -101,12 +102,6 @@ struct node_info { int bootmap_pages; }; -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) -#define PFN_SIZE(x) ((x) >> PAGE_SHIFT) -#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \ - (((unsigned long)(s)) & PAGE_MASK)) - /* * FIXME: We really want to avoid allocating the bootmap bitmap * over the top of the initrd. Hopefully, this is located towards diff --git a/trunk/arch/cris/Kconfig b/trunk/arch/cris/Kconfig index b83261949737..856b665020e7 100644 --- a/trunk/arch/cris/Kconfig +++ b/trunk/arch/cris/Kconfig @@ -16,6 +16,14 @@ config RWSEM_GENERIC_SPINLOCK config RWSEM_XCHGADD_ALGORITHM bool +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/trunk/arch/cris/kernel/setup.c b/trunk/arch/cris/kernel/setup.c index 1ba57efff60d..619a6eefd893 100644 --- a/trunk/arch/cris/kernel/setup.c +++ b/trunk/arch/cris/kernel/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -88,10 +89,6 @@ setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; -#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) -#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) -#define PFN_PHYS(x) ((x) << PAGE_SHIFT) - /* min_low_pfn points to the start of DRAM, start_pfn points * to the first DRAM pages after the kernel, and max_low_pfn * to the end of DRAM. diff --git a/trunk/arch/frv/Kconfig b/trunk/arch/frv/Kconfig index e08383712370..95a3892b8d1b 100644 --- a/trunk/arch/frv/Kconfig +++ b/trunk/arch/frv/Kconfig @@ -17,6 +17,10 @@ config GENERIC_FIND_NEXT_BIT bool default y +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default n diff --git a/trunk/arch/frv/mm/mmu-context.c b/trunk/arch/frv/mm/mmu-context.c index f2c6866fc88b..1530a4111e6d 100644 --- a/trunk/arch/frv/mm/mmu-context.c +++ b/trunk/arch/frv/mm/mmu-context.c @@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx) /* find the first unallocated context number * - 0 is reserved for the kernel */ - cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); + cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1); if (cxn < NR_CXN) { - set_bit(cxn, &cxn_bitmap); + set_bit(cxn, cxn_bitmap); } else { /* none remaining - need to steal someone else's cxn */ @@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm) cxn_pinned = -1; list_del_init(&ctx->id_link); - clear_bit(ctx->id, &cxn_bitmap); + clear_bit(ctx->id, cxn_bitmap); __flush_tlb_mm(ctx->id); ctx->id = 0; } diff --git a/trunk/arch/h8300/Kconfig b/trunk/arch/h8300/Kconfig index 98308b018a35..cabf0bfffc53 100644 --- a/trunk/arch/h8300/Kconfig +++ b/trunk/arch/h8300/Kconfig @@ -29,6 +29,14 @@ config RWSEM_XCHGADD_ALGORITHM bool default n +config GENERIC_FIND_NEXT_BIT + bool + default y + +config GENERIC_HWEIGHT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y diff --git a/trunk/arch/i386/Kconfig b/trunk/arch/i386/Kconfig index b008fb0cd7b7..f17bd1d2707e 100644 --- a/trunk/arch/i386/Kconfig +++ b/trunk/arch/i386/Kconfig @@ -37,6 +37,10 @@ config GENERIC_IOMAP bool default y +config GENERIC_HWEIGHT + bool + default y + config ARCH_MAY_HAVE_PC_FDC bool default y @@ -227,6 +231,15 @@ config SCHED_SMT cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + source "kernel/Kconfig.preempt" config X86_UP_APIC diff --git a/trunk/arch/i386/Makefile b/trunk/arch/i386/Makefile index c848a5b30391..3e4adb1e2244 100644 --- a/trunk/arch/i386/Makefile +++ b/trunk/arch/i386/Makefile @@ -103,7 +103,7 @@ AFLAGS += $(mflags-y) boot := arch/i386/boot PHONY += zImage bzImage compressed zlilo bzlilo \ - zdisk bzdisk fdimage fdimage144 fdimage288 install + zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install all: bzImage @@ -122,7 +122,7 @@ zlilo bzlilo: vmlinux zdisk bzdisk: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk -fdimage fdimage144 fdimage288: vmlinux +fdimage fdimage144 fdimage288 isoimage: vmlinux $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ install: @@ -139,6 +139,9 @@ define archhelp echo ' install to $$(INSTALL_PATH) and run lilo' echo ' bzdisk - Create a boot floppy in /dev/fd0' echo ' fdimage - Create a boot floppy image' + echo ' isoimage - Create a boot CD-ROM image' endef -CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf +CLEAN_FILES += arch/$(ARCH)/boot/fdimage \ + arch/$(ARCH)/boot/image.iso \ + arch/$(ARCH)/boot/mtools.conf diff --git a/trunk/arch/i386/boot/Makefile b/trunk/arch/i386/boot/Makefile index f136752563b1..33e55476381b 100644 --- a/trunk/arch/i386/boot/Makefile +++ b/trunk/arch/i386/boot/Makefile @@ -62,8 +62,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ -# Set this if you want to pass append arguments to the zdisk/fdimage kernel +# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel FDARGS = +# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel +FDINITRD = + +image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,) $(obj)/mtools.conf: $(src)/mtools.conf.in sed -e 's|@OBJ@|$(obj)|g' < $< > $@ @@ -72,8 +76,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in zdisk: $(BOOTIMAGE) $(obj)/mtools.conf MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync syslinux /dev/fd0 ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync # These require being root or having syslinux 2.02 or higher installed @@ -81,18 +88,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync syslinux $(obj)/fdimage ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync syslinux $(obj)/fdimage ; sync - echo 'default linux $(FDARGS)' | \ + echo '$(image_cmdline)' | \ MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \ + fi MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync +isoimage: $(BOOTIMAGE) + -rm -rf $(obj)/isoimage + mkdir $(obj)/isoimage + cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ + $(obj)/isoimage + cp $(BOOTIMAGE) $(obj)/isoimage/linux + echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg + if [ -f '$(FDINITRD)' ] ; then \ + cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \ + fi + mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \ + -no-emul-boot -boot-load-size 4 -boot-info-table \ + $(obj)/isoimage + rm -rf $(obj)/isoimage + zlilo: $(BOOTIMAGE) if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi diff --git a/trunk/arch/i386/boot/video.S b/trunk/arch/i386/boot/video.S index 2ac40c8244c4..0000a2674537 100644 --- a/trunk/arch/i386/boot/video.S +++ b/trunk/arch/i386/boot/video.S @@ -1924,6 +1924,7 @@ skip10: movb %ah, %al ret store_edid: +#ifdef CONFIG_FB_FIRMWARE_EDID pushw %es # just save all registers pushw %ax pushw %bx @@ -1954,6 +1955,7 @@ store_edid: popw %bx popw %ax popw %es +#endif ret # VIDEO_SELECT-only variables diff --git a/trunk/arch/i386/kernel/acpi/boot.c b/trunk/arch/i386/kernel/acpi/boot.c index f1a21945963d..033066176b3e 100644 --- a/trunk/arch/i386/kernel/acpi/boot.c +++ b/trunk/arch/i386/kernel/acpi/boot.c @@ -668,10 +668,10 @@ unsigned long __init acpi_find_rsdp(void) unsigned long rsdp_phys = 0; if (efi_enabled) { - if (efi.acpi20) - return __pa(efi.acpi20); - else if (efi.acpi) - return __pa(efi.acpi); + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + return efi.acpi20; + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + return efi.acpi; } /* * Scan memory looking for the RSDP signature. First search EBDA (low diff --git a/trunk/arch/i386/kernel/cpu/common.c b/trunk/arch/i386/kernel/cpu/common.c index 7e3d6b6a4e96..a06a49075f10 100644 --- a/trunk/arch/i386/kernel/cpu/common.c +++ b/trunk/arch/i386/kernel/cpu/common.c @@ -266,7 +266,7 @@ static void __init early_cpu_detect(void) void __cpuinit generic_identify(struct cpuinfo_x86 * c) { u32 tfms, xlvl; - int junk; + int ebx; if (have_cpuid_p()) { /* Get vendor name */ @@ -282,7 +282,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) /* Intel-defined flags: level 0x00000001 */ if ( c->cpuid_level >= 0x00000001 ) { u32 capability, excap; - cpuid(0x00000001, &tfms, &junk, &excap, &capability); + cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[0] = capability; c->x86_capability[4] = excap; c->x86 = (tfms >> 8) & 15; @@ -292,6 +292,11 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; +#ifdef CONFIG_SMP + c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); +#else + c->apicid = (ebx >> 24) & 0xFF; +#endif } else { /* Have CPUID level 0 only - unheard of */ c->x86 = 4; @@ -474,7 +479,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) cpuid(1, &eax, &ebx, &ecx, &edx); - c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) return; diff --git a/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e5bc06480ff9..712a26bd4457 100644 --- a/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -40,6 +40,7 @@ #ifdef CONFIG_X86_POWERNOW_K8_ACPI #include +#include #include #endif @@ -49,7 +50,7 @@ #include "powernow-k8.h" /* serialize freq changes */ -static DECLARE_MUTEX(fidvid_sem); +static DEFINE_MUTEX(fidvid_mutex); static struct powernow_k8_data *powernow_data[NR_CPUS]; @@ -943,17 +944,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) goto err_out; - down(&fidvid_sem); + mutex_lock(&fidvid_mutex); powernow_k8_acpi_pst_values(data, newstate); if (transition_frequency(data, newstate)) { printk(KERN_ERR PFX "transition frequency failed\n"); ret = 1; - up(&fidvid_sem); + mutex_unlock(&fidvid_mutex); goto err_out; } - up(&fidvid_sem); + mutex_unlock(&fidvid_mutex); pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; @@ -1094,10 +1095,15 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol) static unsigned int powernowk8_get (unsigned int cpu) { - struct powernow_k8_data *data = powernow_data[cpu]; + struct powernow_k8_data *data; cpumask_t oldmask = current->cpus_allowed; unsigned int khz = 0; + data = powernow_data[first_cpu(cpu_core_map[cpu])]; + + if (!data) + return -EINVAL; + set_cpus_allowed(current, cpumask_of_cpu(cpu)); if (smp_processor_id() != cpu) { printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu); diff --git a/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.h index 00ea899c17e1..79a7c5c87edc 100644 --- a/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.h +++ b/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.h @@ -182,10 +182,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); -#ifndef for_each_cpu_mask -#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++) -#endif - #ifdef CONFIG_SMP static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) { diff --git a/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c b/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c index ce61921369e5..9df87b03612c 100644 --- a/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -173,6 +173,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ + unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; +#ifdef CONFIG_SMP + unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); +#endif if (c->cpuid_level > 3) { static int is_initialized; @@ -205,9 +209,15 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) break; case 2: new_l2 = this_leaf.size/1024; + num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l2_id = c->apicid >> index_msb; break; case 3: new_l3 = this_leaf.size/1024; + num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + l3_id = c->apicid >> index_msb; break; default: break; @@ -215,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) } } } - if (c->cpuid_level > 1) { + /* + * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for + * trace cache + */ + if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int i, j, n; int regs[4]; unsigned char *dp = (unsigned char *)regs; + int only_trace = 0; + + if (num_cache_leaves != 0 && c->x86 == 15) + only_trace = 1; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; @@ -241,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) while (cache_table[k].descriptor != 0) { if (cache_table[k].descriptor == des) { + if (only_trace && cache_table[k].cache_type != LVL_TRACE) + break; switch (cache_table[k].cache_type) { case LVL_1_INST: l1i += cache_table[k].size; @@ -266,34 +286,45 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) } } } + } - if (new_l1d) - l1d = new_l1d; + if (new_l1d) + l1d = new_l1d; - if (new_l1i) - l1i = new_l1i; + if (new_l1i) + l1i = new_l1i; - if (new_l2) - l2 = new_l2; + if (new_l2) { + l2 = new_l2; +#ifdef CONFIG_SMP + cpu_llc_id[cpu] = l2_id; +#endif + } - if (new_l3) - l3 = new_l3; + if (new_l3) { + l3 = new_l3; +#ifdef CONFIG_SMP + cpu_llc_id[cpu] = l3_id; +#endif + } - if ( trace ) - printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); - else if ( l1i ) - printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); - if ( l1d ) - printk(", L1 D cache: %dK\n", l1d); - else - printk("\n"); - if ( l2 ) - printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); - if ( l3 ) - printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); + if (trace) + printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); + else if ( l1i ) + printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); - c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); - } + if (l1d) + printk(", L1 D cache: %dK\n", l1d); + else + printk("\n"); + + if (l2) + printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); + + if (l3) + printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); + + c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; } diff --git a/trunk/arch/i386/kernel/cpu/mtrr/main.c b/trunk/arch/i386/kernel/cpu/mtrr/main.c index 3b4618bed70d..fff90bda4733 100644 --- a/trunk/arch/i386/kernel/cpu/mtrr/main.c +++ b/trunk/arch/i386/kernel/cpu/mtrr/main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -47,7 +48,7 @@ u32 num_var_ranges = 0; unsigned int *usage_table; -static DECLARE_MUTEX(mtrr_sem); +static DEFINE_MUTEX(mtrr_mutex); u32 size_or_mask, size_and_mask; @@ -333,7 +334,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); /* Search for existing MTRR */ - down(&mtrr_sem); + mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, <ype); if (base >= lbase + lsize) @@ -371,7 +372,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, printk(KERN_INFO "mtrr: no more MTRRs available\n"); error = i; out: - up(&mtrr_sem); + mutex_unlock(&mtrr_mutex); unlock_cpu_hotplug(); return error; } @@ -464,7 +465,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ lock_cpu_hotplug(); - down(&mtrr_sem); + mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { @@ -503,7 +504,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) set_mtrr(reg, 0, 0, 0); error = reg; out: - up(&mtrr_sem); + mutex_unlock(&mtrr_mutex); unlock_cpu_hotplug(); return error; } @@ -685,7 +686,7 @@ void mtrr_ap_init(void) if (!mtrr_if || !use_intel()) return; /* - * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, + * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, * but this routine will be called in cpu boot time, holding the lock * breaks it. This routine is called in two cases: 1.very earily time * of software resume, when there absolutely isn't mtrr entry changes; diff --git a/trunk/arch/i386/kernel/dmi_scan.c b/trunk/arch/i386/kernel/dmi_scan.c index ebc8dc116c43..5efceebc48dc 100644 --- a/trunk/arch/i386/kernel/dmi_scan.c +++ b/trunk/arch/i386/kernel/dmi_scan.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -185,47 +186,72 @@ static void __init dmi_decode(struct dmi_header *dm) } } -void __init dmi_scan_machine(void) +static int __init dmi_present(char __iomem *p) { u8 buf[15]; - char __iomem *p, *q; + memcpy_fromio(buf, p, 15); + if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { + u16 num = (buf[13] << 8) | buf[12]; + u16 len = (buf[7] << 8) | buf[6]; + u32 base = (buf[11] << 24) | (buf[10] << 16) | + (buf[9] << 8) | buf[8]; - /* - * no iounmap() for that ioremap(); it would be a no-op, but it's - * so early in setup that sucker gets confused into doing what - * it shouldn't if we actually call it. - */ - p = ioremap(0xF0000, 0x10000); - if (p == NULL) - goto out; - - for (q = p; q < p + 0x10000; q += 16) { - memcpy_fromio(buf, q, 15); - if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { - u16 num = (buf[13] << 8) | buf[12]; - u16 len = (buf[7] << 8) | buf[6]; - u32 base = (buf[11] << 24) | (buf[10] << 16) | - (buf[9] << 8) | buf[8]; - - /* - * DMI version 0.0 means that the real version is taken from - * the SMBIOS version, which we don't know at this point. - */ - if (buf[14] != 0) - printk(KERN_INFO "DMI %d.%d present.\n", - buf[14] >> 4, buf[14] & 0xF); - else - printk(KERN_INFO "DMI present.\n"); + /* + * DMI version 0.0 means that the real version is taken from + * the SMBIOS version, which we don't know at this point. + */ + if (buf[14] != 0) + printk(KERN_INFO "DMI %d.%d present.\n", + buf[14] >> 4, buf[14] & 0xF); + else + printk(KERN_INFO "DMI present.\n"); + if (dmi_table(base,len, num, dmi_decode) == 0) + return 0; + } + return 1; +} - if (dmi_table(base,len, num, dmi_decode) == 0) +void __init dmi_scan_machine(void) +{ + char __iomem *p, *q; + int rc; + + if (efi_enabled) { + if (efi.smbios == EFI_INVALID_TABLE_ADDR) + goto out; + + /* This is called as a core_initcall() because it isn't + * needed during early boot. This also means we can + * iounmap the space when we're done with it. + */ + p = dmi_ioremap(efi.smbios, 32); + if (p == NULL) + goto out; + + rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ + dmi_iounmap(p, 32); + if (!rc) + return; + } + else { + /* + * no iounmap() for that ioremap(); it would be a no-op, but + * it's so early in setup that sucker gets confused into doing + * what it shouldn't if we actually call it. + */ + p = dmi_ioremap(0xF0000, 0x10000); + if (p == NULL) + goto out; + + for (q = p; q < p + 0x10000; q += 16) { + rc = dmi_present(q); + if (!rc) return; } } - -out: printk(KERN_INFO "DMI not present or invalid.\n"); + out: printk(KERN_INFO "DMI not present or invalid.\n"); } - /** * dmi_check_system - check system DMI data * @list: array of dmi_system_id structures to match against diff --git a/trunk/arch/i386/kernel/efi.c b/trunk/arch/i386/kernel/efi.c index 7ec6cfa01fb3..9202b67c4b2e 100644 --- a/trunk/arch/i386/kernel/efi.c +++ b/trunk/arch/i386/kernel/efi.c @@ -361,7 +361,7 @@ void __init efi_init(void) */ c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2); if (c16) { - for (i = 0; i < sizeof(vendor) && *c16; ++i) + for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i) vendor[i] = *c16++; vendor[i] = '\0'; } else @@ -381,29 +381,38 @@ void __init efi_init(void) if (config_tables == NULL) printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n"); + efi.mps = EFI_INVALID_TABLE_ADDR; + efi.acpi = EFI_INVALID_TABLE_ADDR; + efi.acpi20 = EFI_INVALID_TABLE_ADDR; + efi.smbios = EFI_INVALID_TABLE_ADDR; + efi.sal_systab = EFI_INVALID_TABLE_ADDR; + efi.boot_info = EFI_INVALID_TABLE_ADDR; + efi.hcdp = EFI_INVALID_TABLE_ADDR; + efi.uga = EFI_INVALID_TABLE_ADDR; + for (i = 0; i < num_config_tables; i++) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { - efi.mps = (void *)config_tables[i].table; + efi.mps = config_tables[i].table; printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { - efi.acpi20 = __va(config_tables[i].table); + efi.acpi20 = config_tables[i].table; printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { - efi.acpi = __va(config_tables[i].table); + efi.acpi = config_tables[i].table; printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { - efi.smbios = (void *) config_tables[i].table; + efi.smbios = config_tables[i].table; printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - efi.hcdp = (void *)config_tables[i].table; + efi.hcdp = config_tables[i].table; printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) { - efi.uga = (void *)config_tables[i].table; + efi.uga = config_tables[i].table; printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table); } } diff --git a/trunk/arch/i386/kernel/io_apic.c b/trunk/arch/i386/kernel/io_apic.c index 311b4e7266f1..3b329af4afc5 100644 --- a/trunk/arch/i386/kernel/io_apic.c +++ b/trunk/arch/i386/kernel/io_apic.c @@ -381,7 +381,7 @@ static void do_irq_balance(void) unsigned long imbalance = 0; cpumask_t allowed_mask, target_cpu_mask, tmp; - for_each_cpu(i) { + for_each_possible_cpu(i) { int package_index; CPU_IRQ(i) = 0; if (!cpu_online(i)) @@ -632,7 +632,7 @@ static int __init balanced_irq_init(void) else printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); failed: - for_each_cpu(i) { + for_each_possible_cpu(i) { kfree(irq_cpu_data[i].irq_delta); irq_cpu_data[i].irq_delta = NULL; kfree(irq_cpu_data[i].last_irq); diff --git a/trunk/arch/i386/kernel/kprobes.c b/trunk/arch/i386/kernel/kprobes.c index 7a59050242a7..f19768789e8a 100644 --- a/trunk/arch/i386/kernel/kprobes.c +++ b/trunk/arch/i386/kernel/kprobes.c @@ -35,12 +35,56 @@ #include #include #include +#include void jprobe_return_end(void); DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); +/* insert a jmp code */ +static inline void set_jmp_op(void *from, void *to) +{ + struct __arch_jmp_op { + char op; + long raddr; + } __attribute__((packed)) *jop; + jop = (struct __arch_jmp_op *)from; + jop->raddr = (long)(to) - ((long)(from) + 5); + jop->op = RELATIVEJUMP_INSTRUCTION; +} + +/* + * returns non-zero if opcodes can be boosted. + */ +static inline int can_boost(kprobe_opcode_t opcode) +{ + switch (opcode & 0xf0 ) { + case 0x70: + return 0; /* can't boost conditional jump */ + case 0x90: + /* can't boost call and pushf */ + return opcode != 0x9a && opcode != 0x9c; + case 0xc0: + /* can't boost undefined opcodes and soft-interruptions */ + return (0xc1 < opcode && opcode < 0xc6) || + (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf; + case 0xd0: + /* can boost AA* and XLAT */ + return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); + case 0xe0: + /* can boost in/out and (may be) jmps */ + return (0xe3 < opcode && opcode != 0xe8); + case 0xf0: + /* clear and set flags can be boost */ + return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); + default: + /* currently, can't boost 2 bytes opcodes */ + return opcode != 0x0f; + } +} + + /* * returns non-zero if opcode modifies the interrupt flag. */ @@ -65,6 +109,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; + if (can_boost(p->opcode)) { + p->ainsn.boostable = 0; + } else { + p->ainsn.boostable = -1; + } return 0; } @@ -155,9 +204,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; - kprobe_opcode_t *addr = NULL; - unsigned long *lp; + kprobe_opcode_t *addr; struct kprobe_ctlblk *kcb; +#ifdef CONFIG_PREEMPT + unsigned pre_preempt_count = preempt_count(); +#endif /* CONFIG_PREEMPT */ + + addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); /* * We don't want to be preempted for the entire @@ -166,17 +219,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) preempt_disable(); kcb = get_kprobe_ctlblk(); - /* Check if the application is using LDT entry for its code segment and - * calculate the address by reading the base address from the LDT entry. - */ - if ((regs->xcs & 4) && (current->mm)) { - lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8) - + (char *) current->mm->context.ldt); - addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip - - sizeof(kprobe_opcode_t)); - } else { - addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); - } /* Check we're not actually recursing */ if (kprobe_running()) { p = get_kprobe(addr); @@ -252,6 +294,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) /* handler has already set things up, so skip ss setup */ return 1; + if (p->ainsn.boostable == 1 && +#ifdef CONFIG_PREEMPT + !(pre_preempt_count) && /* + * This enables booster when the direct + * execution path aren't preempted. + */ +#endif /* CONFIG_PREEMPT */ + !p->post_handler && !p->break_handler ) { + /* Boost up -- we can execute copied instructions directly */ + reset_current_kprobe(); + regs->eip = (unsigned long)p->ainsn.insn; + preempt_enable_no_resched(); + return 1; + } + ss_probe: prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; @@ -267,17 +324,44 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) * here. When a retprobed function returns, this probe is hit and * trampoline_probe_handler() runs, calling the kretprobe's handler. */ - void kretprobe_trampoline_holder(void) + void __kprobes kretprobe_trampoline_holder(void) { - asm volatile ( ".global kretprobe_trampoline\n" + asm volatile ( ".global kretprobe_trampoline\n" "kretprobe_trampoline: \n" - "nop\n"); - } + " pushf\n" + /* skip cs, eip, orig_eax, es, ds */ + " subl $20, %esp\n" + " pushl %eax\n" + " pushl %ebp\n" + " pushl %edi\n" + " pushl %esi\n" + " pushl %edx\n" + " pushl %ecx\n" + " pushl %ebx\n" + " movl %esp, %eax\n" + " call trampoline_handler\n" + /* move eflags to cs */ + " movl 48(%esp), %edx\n" + " movl %edx, 44(%esp)\n" + /* save true return address on eflags */ + " movl %eax, 48(%esp)\n" + " popl %ebx\n" + " popl %ecx\n" + " popl %edx\n" + " popl %esi\n" + " popl %edi\n" + " popl %ebp\n" + " popl %eax\n" + /* skip eip, orig_eax, es, ds */ + " addl $16, %esp\n" + " popf\n" + " ret\n"); +} /* - * Called when we hit the probe point at kretprobe_trampoline + * Called from kretprobe_trampoline */ -int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head; @@ -306,8 +390,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) /* another task is sharing our hash bucket */ continue; - if (ri->rp && ri->rp->handler) + if (ri->rp && ri->rp->handler){ + __get_cpu_var(current_kprobe) = &ri->rp->kp; ri->rp->handler(ri, regs); + __get_cpu_var(current_kprobe) = NULL; + } orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri); @@ -322,18 +409,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) } BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); - regs->eip = orig_ret_address; - reset_current_kprobe(); spin_unlock_irqrestore(&kretprobe_lock, flags); - preempt_enable_no_resched(); - /* - * By returning a non-zero value, we are telling - * kprobe_handler() that we don't want the post_handler - * to run (and have re-enabled preemption) - */ - return 1; + return (void*)orig_ret_address; } /* @@ -357,15 +436,17 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * 2) If the single-stepped instruction was a call, the return address * that is atop the stack is the address following the copied instruction. * We need to make it the address following the original instruction. + * + * This function also checks instruction size for preparing direct execution. */ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = (unsigned long *)®s->esp; - unsigned long next_eip = 0; unsigned long copy_eip = (unsigned long)p->ainsn.insn; unsigned long orig_eip = (unsigned long)p->addr; + regs->eflags &= ~TF_MASK; switch (p->ainsn.insn[0]) { case 0x9c: /* pushfl */ *tos &= ~(TF_MASK | IF_MASK); @@ -375,37 +456,51 @@ static void __kprobes resume_execution(struct kprobe *p, case 0xcb: case 0xc2: case 0xca: - regs->eflags &= ~TF_MASK; - /* eip is already adjusted, no more changes required*/ - return; + case 0xea: /* jmp absolute -- eip is correct */ + /* eip is already adjusted, no more changes required */ + p->ainsn.boostable = 1; + goto no_change; case 0xe8: /* call relative - Fix return addr */ *tos = orig_eip + (*tos - copy_eip); break; case 0xff: if ((p->ainsn.insn[1] & 0x30) == 0x10) { /* call absolute, indirect */ - /* Fix return addr; eip is correct. */ - next_eip = regs->eip; + /* + * Fix return addr; eip is correct. + * But this is not boostable + */ *tos = orig_eip + (*tos - copy_eip); + goto no_change; } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ - /* eip is correct. */ - next_eip = regs->eip; + /* eip is correct. And this is boostable */ + p->ainsn.boostable = 1; + goto no_change; } - break; - case 0xea: /* jmp absolute -- eip is correct */ - next_eip = regs->eip; - break; default: break; } - regs->eflags &= ~TF_MASK; - if (next_eip) { - regs->eip = next_eip; - } else { - regs->eip = orig_eip + (regs->eip - copy_eip); + if (p->ainsn.boostable == 0) { + if ((regs->eip > copy_eip) && + (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { + /* + * These instructions can be executed directly if it + * jumps back to correct address. + */ + set_jmp_op((void *)regs->eip, + (void *)orig_eip + (regs->eip - copy_eip)); + p->ainsn.boostable = 1; + } else { + p->ainsn.boostable = -1; + } } + + regs->eip = orig_eip + (regs->eip - copy_eip); + +no_change: + return; } /* @@ -453,15 +548,57 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs, kcb); + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the eip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->eip = (unsigned long)cur->addr; regs->eflags |= kcb->kprobe_old_eflags; - - reset_current_kprobe(); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if (fixup_exception(regs)) + return 1; + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; } return 0; } @@ -475,6 +612,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch (val) { case DIE_INT3: if (kprobe_handler(args->regs)) @@ -564,12 +704,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } -static struct kprobe trampoline_p = { - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, - .pre_handler = trampoline_probe_handler -}; - int __init arch_init_kprobes(void) { - return register_kprobe(&trampoline_p); + return 0; } diff --git a/trunk/arch/i386/kernel/microcode.c b/trunk/arch/i386/kernel/microcode.c index 55bc365b8753..e7c138f66c5a 100644 --- a/trunk/arch/i386/kernel/microcode.c +++ b/trunk/arch/i386/kernel/microcode.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include @@ -114,7 +115,7 @@ MODULE_LICENSE("GPL"); static DEFINE_SPINLOCK(microcode_update_lock); /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ -static DECLARE_MUTEX(microcode_sem); +static DEFINE_MUTEX(microcode_mutex); static void __user *user_buffer; /* user area microcode data buffer */ static unsigned int user_buffer_size; /* it's size */ @@ -444,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ return -EINVAL; } - down(µcode_sem); + mutex_lock(µcode_mutex); user_buffer = (void __user *) buf; user_buffer_size = (int) len; @@ -453,31 +454,14 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ if (!ret) ret = (ssize_t)len; - up(µcode_sem); + mutex_unlock(µcode_mutex); return ret; } -static int microcode_ioctl (struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - /* - * XXX: will be removed after microcode_ctl - * is updated to ignore failure of this ioctl() - */ - case MICROCODE_IOCFREE: - return 0; - default: - return -EINVAL; - } - return -EINVAL; -} - static struct file_operations microcode_fops = { .owner = THIS_MODULE, .write = microcode_write, - .ioctl = microcode_ioctl, .open = microcode_open, }; diff --git a/trunk/arch/i386/kernel/nmi.c b/trunk/arch/i386/kernel/nmi.c index 9074818b9473..d43b498ec745 100644 --- a/trunk/arch/i386/kernel/nmi.c +++ b/trunk/arch/i386/kernel/nmi.c @@ -138,12 +138,12 @@ static int __init check_nmi_watchdog(void) if (nmi_watchdog == NMI_LOCAL_APIC) smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); - for_each_cpu(cpu) + for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP /* Check cpu_callin_map here because that is set after the timer is started. */ @@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) * Just reset the alert counters, (other CPUs might be * spinning on locks we hold): */ - for_each_cpu(i) + for_each_possible_cpu(i) alert_counter[i] = 0; /* @@ -529,7 +529,8 @@ void nmi_watchdog_tick (struct pt_regs * regs) * always switch the stack NMI-atomically, it's safe to use * smp_processor_id(). */ - int sum, cpu = smp_processor_id(); + unsigned int sum; + int cpu = smp_processor_id(); sum = per_cpu(irq_stat, cpu).apic_timer_irqs; diff --git a/trunk/arch/i386/kernel/process.c b/trunk/arch/i386/kernel/process.c index 299e61674084..24b3e745478b 100644 --- a/trunk/arch/i386/kernel/process.c +++ b/trunk/arch/i386/kernel/process.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include @@ -364,13 +363,6 @@ void exit_thread(void) struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; - /* - * Remove function-return probe instances associated with this task - * and put them back on the free list. Do not insert an exit probe for - * this function, it will be disabled by kprobe_flush_task if you do. - */ - kprobe_flush_task(tsk); - /* The process may have allocated an io port bitmap... nuke it. */ if (unlikely(NULL != t->io_bitmap_ptr)) { int cpu = get_cpu(); diff --git a/trunk/arch/i386/kernel/setup.c b/trunk/arch/i386/kernel/setup.c index d313a11acafa..8c08660b4e5d 100644 --- a/trunk/arch/i386/kernel/setup.c +++ b/trunk/arch/i386/kernel/setup.c @@ -46,6 +46,7 @@ #include #include #include +#include #include