diff --git a/[refs] b/[refs] index 8d7f6f629452..360e00e51848 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: bec706838ec2f9c8c2b99e88a1270d7cba159b06 +refs/heads/master: 0e2595cdfd7df9f1128f7185152601ae5417483b diff --git a/trunk/Documentation/ABI/testing/sysfs-devices-cache_disable b/trunk/Documentation/ABI/testing/sysfs-devices-cache_disable deleted file mode 100644 index 175bb4f70512..000000000000 --- a/trunk/Documentation/ABI/testing/sysfs-devices-cache_disable +++ /dev/null @@ -1,18 +0,0 @@ -What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X -Date: August 2008 -KernelVersion: 2.6.27 -Contact: mark.langsdorf@amd.com -Description: These files exist in every cpu's cache index directories. - There are currently 2 cache_disable_# files in each - directory. Reading from these files on a supported - processor will return that cache disable index value - for that processor and node. Writing to one of these - files will cause the specificed cache index to be disabled. - - Currently, only AMD Family 10h Processors support cache index - disable, and only for their L3 caches. See the BIOS and - Kernel Developer's Guide at - http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/31116-Public-GH-BKDG_3.20_2-4-09.pdf - for formatting information and other details on the - cache index disable. -Users: joachim.deguara@amd.com diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 11648c13a729..fd5cac013037 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -1575,9 +1575,6 @@ and is between 256 and 4096 characters. It is defined in the file noinitrd [RAM] Tells the kernel not to load any configured initial RAM disk. - nointremap [X86-64, Intel-IOMMU] Do not enable interrupt - remapping. - nointroute [IA-64] nojitter [IA64] Disables jitter checking for ITC timers. diff --git a/trunk/Documentation/memory-barriers.txt b/trunk/Documentation/memory-barriers.txt index 7f5809eddee6..f5b7127f54ac 100644 --- a/trunk/Documentation/memory-barriers.txt +++ b/trunk/Documentation/memory-barriers.txt @@ -31,7 +31,6 @@ Contents: - Locking functions. - Interrupt disabling functions. - - Sleep and wake-up functions. - Miscellaneous functions. (*) Inter-CPU locking barrier effects. @@ -1218,132 +1217,6 @@ barriers are required in such a situation, they must be provided from some other means. -SLEEP AND WAKE-UP FUNCTIONS ---------------------------- - -Sleeping and waking on an event flagged in global data can be viewed as an -interaction between two pieces of data: the task state of the task waiting for -the event and the global data used to indicate the event. To make sure that -these appear to happen in the right order, the primitives to begin the process -of going to sleep, and the primitives to initiate a wake up imply certain -barriers. - -Firstly, the sleeper normally follows something like this sequence of events: - - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (event_indicated) - break; - schedule(); - } - -A general memory barrier is interpolated automatically by set_current_state() -after it has altered the task state: - - CPU 1 - =============================== - set_current_state(); - set_mb(); - STORE current->state - - LOAD event_indicated - -set_current_state() may be wrapped by: - - prepare_to_wait(); - prepare_to_wait_exclusive(); - -which therefore also imply a general memory barrier after setting the state. -The whole sequence above is available in various canned forms, all of which -interpolate the memory barrier in the right place: - - wait_event(); - wait_event_interruptible(); - wait_event_interruptible_exclusive(); - wait_event_interruptible_timeout(); - wait_event_killable(); - wait_event_timeout(); - wait_on_bit(); - wait_on_bit_lock(); - - -Secondly, code that performs a wake up normally follows something like this: - - event_indicated = 1; - wake_up(&event_wait_queue); - -or: - - event_indicated = 1; - wake_up_process(event_daemon); - -A write memory barrier is implied by wake_up() and co. if and only if they wake -something up. The barrier occurs before the task state is cleared, and so sits -between the STORE to indicate the event and the STORE to set TASK_RUNNING: - - CPU 1 CPU 2 - =============================== =============================== - set_current_state(); STORE event_indicated - set_mb(); wake_up(); - STORE current->state - STORE current->state - LOAD event_indicated - -The available waker functions include: - - complete(); - wake_up(); - wake_up_all(); - wake_up_bit(); - wake_up_interruptible(); - wake_up_interruptible_all(); - wake_up_interruptible_nr(); - wake_up_interruptible_poll(); - wake_up_interruptible_sync(); - wake_up_interruptible_sync_poll(); - wake_up_locked(); - wake_up_locked_poll(); - wake_up_nr(); - wake_up_poll(); - wake_up_process(); - - -[!] Note that the memory barriers implied by the sleeper and the waker do _not_ -order multiple stores before the wake-up with respect to loads of those stored -values after the sleeper has called set_current_state(). For instance, if the -sleeper does: - - set_current_state(TASK_INTERRUPTIBLE); - if (event_indicated) - break; - __set_current_state(TASK_RUNNING); - do_something(my_data); - -and the waker does: - - my_data = value; - event_indicated = 1; - wake_up(&event_wait_queue); - -there's no guarantee that the change to event_indicated will be perceived by -the sleeper as coming after the change to my_data. In such a circumstance, the -code on both sides must interpolate its own memory barriers between the -separate data accesses. Thus the above sleeper ought to do: - - set_current_state(TASK_INTERRUPTIBLE); - if (event_indicated) { - smp_rmb(); - do_something(my_data); - } - -and the waker should do: - - my_data = value; - smp_wmb(); - event_indicated = 1; - wake_up(&event_wait_queue); - - MISCELLANEOUS FUNCTIONS ----------------------- @@ -1493,7 +1366,7 @@ WHERE ARE MEMORY BARRIERS NEEDED? Under normal operation, memory operation reordering is generally not going to be a problem as a single-threaded linear piece of code will still appear to -work correctly, even if it's in an SMP kernel. There are, however, four +work correctly, even if it's in an SMP kernel. There are, however, three circumstances in which reordering definitely _could_ be a problem: (*) Interprocessor interaction. diff --git a/trunk/Documentation/scheduler/sched-rt-group.txt b/trunk/Documentation/scheduler/sched-rt-group.txt index 1df7f9cdab05..5ba4d3fc625a 100644 --- a/trunk/Documentation/scheduler/sched-rt-group.txt +++ b/trunk/Documentation/scheduler/sched-rt-group.txt @@ -4,7 +4,6 @@ CONTENTS ======== -0. WARNING 1. Overview 1.1 The problem 1.2 The solution @@ -15,23 +14,6 @@ CONTENTS 3. Future plans -0. WARNING -========== - - Fiddling with these settings can result in an unstable system, the knobs are - root only and assumes root knows what he is doing. - -Most notable: - - * very small values in sched_rt_period_us can result in an unstable - system when the period is smaller than either the available hrtimer - resolution, or the time it takes to handle the budget refresh itself. - - * very small values in sched_rt_runtime_us can result in an unstable - system when the runtime is so small the system has difficulty making - forward progress (NOTE: the migration thread and kstopmachine both - are real-time processes). - 1. Overview =========== @@ -187,7 +169,7 @@ get their allocated time. Implementing SCHED_EDF might take a while to complete. Priority Inheritance is the biggest challenge as the current linux PI infrastructure is geared towards -the limited static priority levels 0-99. With deadline scheduling you need to +the limited static priority levels 0-139. With deadline scheduling you need to do deadline inheritance (since priority is inversely proportional to the deadline delta (deadline - now). diff --git a/trunk/Documentation/trace/ftrace.txt b/trunk/Documentation/trace/ftrace.txt index e362f50c496f..fd9a3e693813 100644 --- a/trunk/Documentation/trace/ftrace.txt +++ b/trunk/Documentation/trace/ftrace.txt @@ -518,18 +518,9 @@ priority with zero (0) being the highest priority and the nice values starting at 100 (nice -20). Below is a quick chart to map the kernel priority to user land priorities. - Kernel Space User Space - =============================================================== - 0(high) to 98(low) user RT priority 99(high) to 1(low) - with SCHED_RR or SCHED_FIFO - --------------------------------------------------------------- - 99 sched_priority is not used in scheduling - decisions(it must be specified as 0) - --------------------------------------------------------------- - 100(high) to 139(low) user nice -20(high) to 19(low) - --------------------------------------------------------------- - 140 idle task priority - --------------------------------------------------------------- + Kernel priority: 0 to 99 ==> user RT priority 99 to 0 + Kernel priority: 100 to 139 ==> user nice -20 to 19 + Kernel priority: 140 ==> idle task priority The task states are: diff --git a/trunk/Documentation/x86/boot.txt b/trunk/Documentation/x86/boot.txt index 8da3a795083f..e0203662f9e9 100644 --- a/trunk/Documentation/x86/boot.txt +++ b/trunk/Documentation/x86/boot.txt @@ -50,10 +50,6 @@ Protocol 2.08: (Kernel 2.6.26) Added crc32 checksum and ELF format Protocol 2.09: (Kernel 2.6.26) Added a field of 64-bit physical pointer to single linked list of struct setup_data. -Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment - beyond the kernel_alignment added, new init_size and - pref_address fields. Added extended boot loader IDs. - **** MEMORY LAYOUT The traditional memory map for the kernel loader, used for Image or @@ -172,13 +168,12 @@ Offset Proto Name Meaning 021C/4 2.00+ ramdisk_size initrd size (set by boot loader) 0220/4 2.00+ bootsect_kludge DO NOT USE - for bootsect.S use only 0224/2 2.01+ heap_end_ptr Free memory after setup end -0226/1 2.02+(3 ext_loader_ver Extended boot loader version -0227/1 2.02+(3 ext_loader_type Extended boot loader ID +0226/2 N/A pad1 Unused 0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line 022C/4 2.03+ ramdisk_max Highest legal initrd address 0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel 0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not -0235/1 2.10+ min_alignment Minimum alignment, as a power of two +0235/1 N/A pad2 Unused 0236/2 N/A pad3 Unused 0238/4 2.06+ cmdline_size Maximum size of the kernel command line 023C/4 2.07+ hardware_subarch Hardware subarchitecture @@ -187,8 +182,6 @@ Offset Proto Name Meaning 024C/4 2.08+ payload_length Length of kernel payload 0250/8 2.09+ setup_data 64-bit physical pointer to linked list of struct setup_data -0258/8 2.10+ pref_address Preferred loading address -0260/4 2.10+ init_size Linear memory required during initialization (1) For backwards compatibility, if the setup_sects field contains 0, the real value is 4. @@ -197,8 +190,6 @@ Offset Proto Name Meaning field are unusable, which means the size of a bzImage kernel cannot be determined. -(3) Ignored, but safe to set, for boot protocols 2.02-2.09. - If the "HdrS" (0x53726448) magic number is not found at offset 0x202, the boot protocol version is "old". Loading an old kernel, the following parameters should be assumed: @@ -352,32 +343,18 @@ Protocol: 2.00+ 0xTV here, where T is an identifier for the boot loader and V is a version number. Otherwise, enter 0xFF here. - For boot loader IDs above T = 0xD, write T = 0xE to this field and - write the extended ID minus 0x10 to the ext_loader_type field. - Similarly, the ext_loader_ver field can be used to provide more than - four bits for the bootloader version. - - For example, for T = 0x15, V = 0x234, write: - - type_of_loader <- 0xE4 - ext_loader_type <- 0x05 - ext_loader_ver <- 0x23 - Assigned boot loader ids: 0 LILO (0x00 reserved for pre-2.00 bootloader) 1 Loadlin 2 bootsect-loader (0x20, all other values reserved) - 3 Syslinux - 4 Etherboot/gPXE + 3 SYSLINUX + 4 EtherBoot 5 ELILO 7 GRUB - 8 U-Boot + 8 U-BOOT 9 Xen A Gujin B Qemu - C Arcturus Networks uCbootloader - E Extended (see ext_loader_type) - F Special (0xFF = undefined) Please contact if you need a bootloader ID value assigned. @@ -476,35 +453,6 @@ Protocol: 2.01+ Set this field to the offset (from the beginning of the real-mode code) of the end of the setup stack/heap, minus 0x0200. -Field name: ext_loader_ver -Type: write (optional) -Offset/size: 0x226/1 -Protocol: 2.02+ - - This field is used as an extension of the version number in the - type_of_loader field. The total version number is considered to be - (type_of_loader & 0x0f) + (ext_loader_ver << 4). - - The use of this field is boot loader specific. If not written, it - is zero. - - Kernels prior to 2.6.31 did not recognize this field, but it is safe - to write for protocol version 2.02 or higher. - -Field name: ext_loader_type -Type: write (obligatory if (type_of_loader & 0xf0) == 0xe0) -Offset/size: 0x227/1 -Protocol: 2.02+ - - This field is used as an extension of the type number in - type_of_loader field. If the type in type_of_loader is 0xE, then - the actual type is (ext_loader_type + 0x10). - - This field is ignored if the type in type_of_loader is not 0xE. - - Kernels prior to 2.6.31 did not recognize this field, but it is safe - to write for protocol version 2.02 or higher. - Field name: cmd_line_ptr Type: write (obligatory) Offset/size: 0x228/4 @@ -534,19 +482,11 @@ Protocol: 2.03+ 0x37FFFFFF, you can start your ramdisk at 0x37FE0000.) Field name: kernel_alignment -Type: read/modify (reloc) +Type: read (reloc) Offset/size: 0x230/4 -Protocol: 2.05+ (read), 2.10+ (modify) - - Alignment unit required by the kernel (if relocatable_kernel is - true.) A relocatable kernel that is loaded at an alignment - incompatible with the value in this field will be realigned during - kernel initialization. +Protocol: 2.05+ - Starting with protocol version 2.10, this reflects the kernel - alignment preferred for optimal performance; it is possible for the - loader to modify this field to permit a lesser alignment. See the - min_alignment and pref_address field below. + Alignment unit required by the kernel (if relocatable_kernel is true.) Field name: relocatable_kernel Type: read (reloc) @@ -558,22 +498,6 @@ Protocol: 2.05+ After loading, the boot loader must set the code32_start field to point to the loaded code, or to a boot loader hook. -Field name: min_alignment -Type: read (reloc) -Offset/size: 0x235/1 -Protocol: 2.10+ - - This field, if nonzero, indicates as a power of two the minimum - alignment required, as opposed to preferred, by the kernel to boot. - If a boot loader makes use of this field, it should update the - kernel_alignment field with the alignment unit desired; typically: - - kernel_alignment = 1 << min_alignment - - There may be a considerable performance cost with an excessively - misaligned kernel. Therefore, a loader should typically try each - power-of-two alignment from kernel_alignment down to this alignment. - Field name: cmdline_size Type: read Offset/size: 0x238/4 @@ -658,36 +582,6 @@ Protocol: 2.09+ sure to consider the case where the linked list already contains entries. -Field name: pref_address -Type: read (reloc) -Offset/size: 0x258/8 -Protocol: 2.10+ - - This field, if nonzero, represents a preferred load address for the - kernel. A relocating bootloader should attempt to load at this - address if possible. - - A non-relocatable kernel will unconditionally move itself and to run - at this address. - -Field name: init_size -Type: read -Offset/size: 0x25c/4 - - This field indicates the amount of linear contiguous memory starting - at the kernel runtime start address that the kernel needs before it - is capable of examining its memory map. This is not the same thing - as the total amount of memory the kernel needs to boot, but it can - be used by a relocating boot loader to help select a safe load - address for the kernel. - - The kernel runtime start address is determined by the following algorithm: - - if (relocatable_kernel) - runtime_start = align_up(load_address, kernel_alignment) - else - runtime_start = pref_address - **** THE IMAGE CHECKSUM diff --git a/trunk/Documentation/x86/x86_64/boot-options.txt b/trunk/Documentation/x86/x86_64/boot-options.txt index 2db5893d6c97..34c13040a718 100644 --- a/trunk/Documentation/x86/x86_64/boot-options.txt +++ b/trunk/Documentation/x86/x86_64/boot-options.txt @@ -150,6 +150,11 @@ NUMA Otherwise, the remaining system RAM is allocated to an additional node. + numa=hotadd=percent + Only allow hotadd memory to preallocate page structures upto + percent of already available memory. + numa=hotadd=0 will disable hotadd memory. + ACPI acpi=off Don't enable ACPI diff --git a/trunk/Documentation/x86/x86_64/mm.txt b/trunk/Documentation/x86/x86_64/mm.txt index d6498e3cd713..29b52b14d0b4 100644 --- a/trunk/Documentation/x86/x86_64/mm.txt +++ b/trunk/Documentation/x86/x86_64/mm.txt @@ -6,11 +6,10 @@ Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm hole caused by [48:63] sign extension ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole -ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory -ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole -ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space -ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole -ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) +ffff880000000000 - ffffc0ffffffffff (=57 TB) direct mapping of all phys. memory +ffffc10000000000 - ffffc1ffffffffff (=40 bits) hole +ffffc20000000000 - ffffe1ffffffffff (=45 bits) vmalloc/ioremap space +ffffe20000000000 - ffffe2ffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 ffffffffa0000000 - fffffffffff00000 (=1536 MB) module mapping space diff --git a/trunk/Makefile b/trunk/Makefile index 03373bb703ca..739fd34a72a2 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 30 -EXTRAVERSION = +EXTRAVERSION = -rc7 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* @@ -533,7 +533,7 @@ endif include $(srctree)/arch/$(SRCARCH)/Makefile -ifneq ($(CONFIG_FRAME_WARN),0) +ifneq (CONFIG_FRAME_WARN,0) KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) endif diff --git a/trunk/arch/alpha/kernel/sys_dp264.c b/trunk/arch/alpha/kernel/sys_dp264.c index 5bd5259324b7..9c9d1fd4155f 100644 --- a/trunk/arch/alpha/kernel/sys_dp264.c +++ b/trunk/arch/alpha/kernel/sys_dp264.c @@ -176,26 +176,22 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) } } -static int +static void dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) { spin_lock(&dp264_irq_lock); cpu_set_irq_affinity(irq, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); - - return 0; } -static int +static void clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) { spin_lock(&dp264_irq_lock); cpu_set_irq_affinity(irq - 16, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); - - return 0; } static struct hw_interrupt_type dp264_irq_type = { diff --git a/trunk/arch/alpha/kernel/sys_titan.c b/trunk/arch/alpha/kernel/sys_titan.c index 8dd239ebdb9e..27f840a4ad3d 100644 --- a/trunk/arch/alpha/kernel/sys_titan.c +++ b/trunk/arch/alpha/kernel/sys_titan.c @@ -157,15 +157,13 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) } -static int +static void titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) { spin_lock(&titan_irq_lock); titan_cpu_set_irq_affinity(irq - 16, *affinity); titan_update_irq_hw(titan_cached_irq_mask); spin_unlock(&titan_irq_lock); - - return 0; } static void diff --git a/trunk/arch/arm/common/gic.c b/trunk/arch/arm/common/gic.c index 664c7b8b1ba8..3e1714c6523f 100644 --- a/trunk/arch/arm/common/gic.c +++ b/trunk/arch/arm/common/gic.c @@ -109,7 +109,7 @@ static void gic_unmask_irq(unsigned int irq) } #ifdef CONFIG_SMP -static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) +static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) { void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); unsigned int shift = (irq % 4) * 8; @@ -122,8 +122,6 @@ static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) val |= 1 << (cpu + shift); writel(val, reg); spin_unlock(&irq_controller_lock); - - return 0; } #endif diff --git a/trunk/arch/arm/include/asm/cache.h b/trunk/arch/arm/include/asm/cache.h index feaa75f0013e..cb7a9e97fd7e 100644 --- a/trunk/arch/arm/include/asm/cache.h +++ b/trunk/arch/arm/include/asm/cache.h @@ -7,20 +7,4 @@ #define L1_CACHE_SHIFT 5 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -/* - * Memory returned by kmalloc() may be used for DMA, so we must make - * sure that all such allocations are cache aligned. Otherwise, - * unrelated code may cause parts of the buffer to be read into the - * cache before the transfer is done, causing old data to be seen by - * the CPU. - */ -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES - -/* - * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. - */ -#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) -#define ARCH_SLAB_MINALIGN 8 -#endif - #endif diff --git a/trunk/arch/arm/include/asm/page.h b/trunk/arch/arm/include/asm/page.h index 7b522770f29d..e6eb8a67b807 100644 --- a/trunk/arch/arm/include/asm/page.h +++ b/trunk/arch/arm/include/asm/page.h @@ -202,6 +202,13 @@ typedef struct page *pgtable_t; (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +/* + * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. + */ +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +#define ARCH_SLAB_MINALIGN 8 +#endif + #include #endif diff --git a/trunk/arch/arm/mach-mx2/clock_imx21.c b/trunk/arch/arm/mach-mx2/clock_imx21.c index e4b08ca804ea..999d013e06e3 100644 --- a/trunk/arch/arm/mach-mx2/clock_imx21.c +++ b/trunk/arch/arm/mach-mx2/clock_imx21.c @@ -890,7 +890,7 @@ static struct clk clko_clk = { .con_id = n, \ .clk = &c, \ }, -static struct clk_lookup lookups[] = { +static struct clk_lookup lookups[] __initdata = { /* It's unlikely that any driver wants one of them directly: _REGISTER_CLOCK(NULL, "ckih", ckih_clk) _REGISTER_CLOCK(NULL, "ckil", ckil_clk) diff --git a/trunk/arch/arm/mach-mx2/clock_imx27.c b/trunk/arch/arm/mach-mx2/clock_imx27.c index 2c971442f3f2..3f7280c490f0 100644 --- a/trunk/arch/arm/mach-mx2/clock_imx27.c +++ b/trunk/arch/arm/mach-mx2/clock_imx27.c @@ -621,7 +621,7 @@ DEFINE_CLOCK1(csi_clk, 0, 0, 0, parent, &csi_clk1, &per4_clk); .clk = &c, \ }, -static struct clk_lookup lookups[] = { +static struct clk_lookup lookups[] __initdata = { _REGISTER_CLOCK("imx-uart.0", NULL, uart1_clk) _REGISTER_CLOCK("imx-uart.1", NULL, uart2_clk) _REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk) diff --git a/trunk/arch/arm/mach-mx3/clock-imx35.c b/trunk/arch/arm/mach-mx3/clock-imx35.c index 3c1e06f56dd6..53a112d4e04a 100644 --- a/trunk/arch/arm/mach-mx3/clock-imx35.c +++ b/trunk/arch/arm/mach-mx3/clock-imx35.c @@ -404,7 +404,7 @@ DEFINE_CLOCK(gpu2d_clk, 0, CCM_CGR3, 4, NULL, NULL); .clk = &c, \ }, -static struct clk_lookup lookups[] = { +static struct clk_lookup lookups[] __initdata = { _REGISTER_CLOCK(NULL, "asrc", asrc_clk) _REGISTER_CLOCK(NULL, "ata", ata_clk) _REGISTER_CLOCK(NULL, "audmux", audmux_clk) diff --git a/trunk/arch/arm/mach-mx3/clock.c b/trunk/arch/arm/mach-mx3/clock.c index a68fcf981edf..9957a11533a4 100644 --- a/trunk/arch/arm/mach-mx3/clock.c +++ b/trunk/arch/arm/mach-mx3/clock.c @@ -516,7 +516,7 @@ DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); .clk = &c, \ }, -static struct clk_lookup lookups[] = { +static struct clk_lookup lookups[] __initdata = { _REGISTER_CLOCK(NULL, "emi", emi_clk) _REGISTER_CLOCK(NULL, "cspi", cspi1_clk) _REGISTER_CLOCK(NULL, "cspi", cspi2_clk) diff --git a/trunk/arch/arm/mach-pxa/devices.c b/trunk/arch/arm/mach-pxa/devices.c index 29970f703f3c..d245e59c51b1 100644 --- a/trunk/arch/arm/mach-pxa/devices.c +++ b/trunk/arch/arm/mach-pxa/devices.c @@ -72,10 +72,7 @@ void __init pxa_set_mci_info(struct pxamci_platform_data *info) } -static struct pxa2xx_udc_mach_info pxa_udc_info = { - .gpio_pullup = -1, - .gpio_vbus = -1, -}; +static struct pxa2xx_udc_mach_info pxa_udc_info; void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info) { diff --git a/trunk/arch/arm/mach-pxa/imote2.c b/trunk/arch/arm/mach-pxa/imote2.c index 2b27336c29f1..2121309b2474 100644 --- a/trunk/arch/arm/mach-pxa/imote2.c +++ b/trunk/arch/arm/mach-pxa/imote2.c @@ -412,7 +412,7 @@ static struct platform_device imote2_flash_device = { */ static struct i2c_board_info __initdata imote2_i2c_board_info[] = { { /* UCAM sensor board */ - .type = "max1239", + .type = "max1238", .addr = 0x35, }, { /* ITS400 Sensor board only */ .type = "max1363", diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S index a08d9d2380d3..3397f1e64d76 100644 --- a/trunk/arch/arm/mm/proc-v7.S +++ b/trunk/arch/arm/mm/proc-v7.S @@ -184,37 +184,23 @@ __v7_setup: stmia r12, {r0-r5, r7, r9, r11, lr} bl v7_flush_dcache_all ldmia r12, {r0-r5, r7, r9, r11, lr} - - mrc p15, 0, r0, c0, c0, 0 @ read main ID register - and r10, r0, #0xff000000 @ ARM? - teq r10, #0x41000000 - bne 2f - and r5, r0, #0x00f00000 @ variant - and r6, r0, #0x0000000f @ revision - orr r0, r6, r5, lsr #20-4 @ combine variant and revision - #ifdef CONFIG_ARM_ERRATA_430973 - teq r5, #0x00100000 @ only present in r1p* - mrceq p15, 0, r10, c1, c0, 1 @ read aux control register - orreq r10, r10, #(1 << 6) @ set IBE to 1 - mcreq p15, 0, r10, c1, c0, 1 @ write aux control register + mrc p15, 0, r10, c1, c0, 1 @ read aux control register + orr r10, r10, #(1 << 6) @ set IBE to 1 + mcr p15, 0, r10, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_458693 - teq r0, #0x20 @ only present in r2p0 - mrceq p15, 0, r10, c1, c0, 1 @ read aux control register - orreq r10, r10, #(1 << 5) @ set L1NEON to 1 - orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 - mcreq p15, 0, r10, c1, c0, 1 @ write aux control register + mrc p15, 0, r10, c1, c0, 1 @ read aux control register + orr r10, r10, #(1 << 5) @ set L1NEON to 1 + orr r10, r10, #(1 << 9) @ set PLDNOP to 1 + mcr p15, 0, r10, c1, c0, 1 @ write aux control register #endif #ifdef CONFIG_ARM_ERRATA_460075 - teq r0, #0x20 @ only present in r2p0 - mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register - tsteq r10, #1 << 22 - orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit - mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register + mrc p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register + orr r10, r10, #(1 << 22) @ set the Write Allocate disable bit + mcr p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register #endif - -2: mov r10, #0 + mov r10, #0 #ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate #endif diff --git a/trunk/arch/cris/arch-v32/kernel/irq.c b/trunk/arch/cris/arch-v32/kernel/irq.c index d70b445f4a8f..df3925cb1c7f 100644 --- a/trunk/arch/cris/arch-v32/kernel/irq.c +++ b/trunk/arch/cris/arch-v32/kernel/irq.c @@ -325,14 +325,12 @@ static void end_crisv32_irq(unsigned int irq) { } -int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) +void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); irq_allocations[irq - FIRST_IRQ].mask = *dest; spin_unlock_irqrestore(&irq_lock, flags); - - return 0; } static struct irq_chip crisv32_irq_type = { diff --git a/trunk/arch/ia64/hp/sim/hpsim_irq.c b/trunk/arch/ia64/hp/sim/hpsim_irq.c index acb5047ab573..cc0a3182db3c 100644 --- a/trunk/arch/ia64/hp/sim/hpsim_irq.c +++ b/trunk/arch/ia64/hp/sim/hpsim_irq.c @@ -21,10 +21,9 @@ hpsim_irq_noop (unsigned int irq) { } -static int +static void hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) { - return 0; } static struct hw_interrupt_type irq_type_hp_sim = { diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c index baec6f00f7f3..5510317db37b 100644 --- a/trunk/arch/ia64/kernel/acpi.c +++ b/trunk/arch/ia64/kernel/acpi.c @@ -636,7 +636,7 @@ void __init acpi_numa_arch_fixup(void) * success: return IRQ number (>=0) * failure: return < 0 */ -int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) +int acpi_register_gsi(u32 gsi, int triggering, int polarity) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return gsi; @@ -678,8 +678,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table) fadt = (struct acpi_table_fadt *)fadt_header; - acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, - ACPI_ACTIVE_LOW); + acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; } diff --git a/trunk/arch/ia64/kernel/iosapic.c b/trunk/arch/ia64/kernel/iosapic.c index f92cef47bf86..166e0d839fa0 100644 --- a/trunk/arch/ia64/kernel/iosapic.c +++ b/trunk/arch/ia64/kernel/iosapic.c @@ -329,7 +329,7 @@ unmask_irq (unsigned int irq) } -static int +static void iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) { #ifdef CONFIG_SMP @@ -343,15 +343,15 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) cpu = cpumask_first_and(cpu_online_mask, mask); if (cpu >= nr_cpu_ids) - return -1; + return; if (irq_prepare_move(irq, cpu)) - return -1; + return; dest = cpu_physical_id(cpu); if (!iosapic_intr_info[irq].count) - return -1; /* not an IOSAPIC interrupt */ + return; /* not an IOSAPIC interrupt */ set_irq_affinity_info(irq, dest, redir); @@ -376,9 +376,7 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32); iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32); } - #endif - return 0; } /* diff --git a/trunk/arch/ia64/kernel/msi_ia64.c b/trunk/arch/ia64/kernel/msi_ia64.c index 0f8ade9331ba..2b15e233f7fe 100644 --- a/trunk/arch/ia64/kernel/msi_ia64.c +++ b/trunk/arch/ia64/kernel/msi_ia64.c @@ -12,7 +12,7 @@ static struct irq_chip ia64_msi_chip; #ifdef CONFIG_SMP -static int ia64_set_msi_irq_affinity(unsigned int irq, +static void ia64_set_msi_irq_affinity(unsigned int irq, const cpumask_t *cpu_mask) { struct msi_msg msg; @@ -20,10 +20,10 @@ static int ia64_set_msi_irq_affinity(unsigned int irq, int cpu = first_cpu(*cpu_mask); if (!cpu_online(cpu)) - return -1; + return; if (irq_prepare_move(irq, cpu)) - return -1; + return; read_msi_msg(irq, &msg); @@ -39,8 +39,6 @@ static int ia64_set_msi_irq_affinity(unsigned int irq, write_msi_msg(irq, &msg); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); - - return 0; } #endif /* CONFIG_SMP */ @@ -132,17 +130,17 @@ void arch_teardown_msi_irq(unsigned int irq) #ifdef CONFIG_DMAR #ifdef CONFIG_SMP -static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; int cpu = cpumask_first(mask); if (!cpu_online(cpu)) - return -1; + return; if (irq_prepare_move(irq, cpu)) - return -1; + return; dmar_msi_read(irq, &msg); @@ -153,8 +151,6 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) dmar_msi_write(irq, &msg); cpumask_copy(irq_desc[irq].affinity, mask); - - return 0; } #endif /* CONFIG_SMP */ diff --git a/trunk/arch/ia64/sn/kernel/irq.c b/trunk/arch/ia64/sn/kernel/irq.c index 764f26abac05..66fd705e82c0 100644 --- a/trunk/arch/ia64/sn/kernel/irq.c +++ b/trunk/arch/ia64/sn/kernel/irq.c @@ -227,7 +227,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, return new_irq_info; } -static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) +static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) { struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; nasid_t nasid; @@ -239,8 +239,6 @@ static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, sn_irq_lh[irq], list) (void)sn_retarget_vector(sn_irq_info, nasid, slice); - - return 0; } #ifdef CONFIG_SMP diff --git a/trunk/arch/ia64/sn/kernel/msi_sn.c b/trunk/arch/ia64/sn/kernel/msi_sn.c index fbbfb9701201..81e428943d73 100644 --- a/trunk/arch/ia64/sn/kernel/msi_sn.c +++ b/trunk/arch/ia64/sn/kernel/msi_sn.c @@ -151,7 +151,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) } #ifdef CONFIG_SMP -static int sn_set_msi_irq_affinity(unsigned int irq, +static void sn_set_msi_irq_affinity(unsigned int irq, const struct cpumask *cpu_mask) { struct msi_msg msg; @@ -168,7 +168,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq, cpu = cpumask_first(cpu_mask); sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) - return -1; + return; /* * Release XIO resources for the old MSI PCI address @@ -189,7 +189,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq, new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); sn_msi_info[irq].sn_irq_info = new_irq_info; if (new_irq_info == NULL) - return -1; + return; /* * Map the xio address into bus space @@ -206,8 +206,6 @@ static int sn_set_msi_irq_affinity(unsigned int irq, write_msi_msg(irq, &msg); cpumask_copy(irq_desc[irq].affinity, cpu_mask); - - return 0; } #endif /* CONFIG_SMP */ diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index 25f3b0a11ca8..09b1287a92ce 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -72,7 +72,6 @@ config MIPS_COBALT select IRQ_CPU select IRQ_GT641XX select PCI_GT64XXX_PCI0 - select PCI select SYS_HAS_CPU_NEVADA select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_32BIT_KERNEL @@ -594,7 +593,7 @@ config WR_PPMC board, which is based on GT64120 bridge chip. config CAVIUM_OCTEON_SIMULATOR - bool "Cavium Networks Octeon Simulator" + bool "Support for the Cavium Networks Octeon Simulator" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT @@ -608,7 +607,7 @@ config CAVIUM_OCTEON_SIMULATOR hardware. config CAVIUM_OCTEON_REFERENCE_BOARD - bool "Cavium Networks Octeon reference board" + bool "Support for the Cavium Networks Octeon reference board" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT diff --git a/trunk/arch/mips/cavium-octeon/octeon-irq.c b/trunk/arch/mips/cavium-octeon/octeon-irq.c index d3a0c8154bec..1c19af8daa62 100644 --- a/trunk/arch/mips/cavium-octeon/octeon-irq.c +++ b/trunk/arch/mips/cavium-octeon/octeon-irq.c @@ -177,7 +177,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) } #ifdef CONFIG_SMP -static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) +static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) { int cpu; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ @@ -199,8 +199,6 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); write_unlock(&octeon_irq_ciu0_rwlock); - - return 0; } #endif @@ -294,7 +292,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) } #ifdef CONFIG_SMP -static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) +static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) { int cpu; int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ @@ -317,8 +315,6 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask * */ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); write_unlock(&octeon_irq_ciu1_rwlock); - - return 0; } #endif diff --git a/trunk/arch/mips/include/asm/cpu-info.h b/trunk/arch/mips/include/asm/cpu-info.h index 126044308dec..744cd8fb107f 100644 --- a/trunk/arch/mips/include/asm/cpu-info.h +++ b/trunk/arch/mips/include/asm/cpu-info.h @@ -39,8 +39,8 @@ struct cache_desc { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned int udelay_val; - unsigned int asid_cache; + unsigned long udelay_val; + unsigned long asid_cache; /* * Capability and feature descriptor structure for MIPS CPU diff --git a/trunk/arch/mips/include/asm/delay.h b/trunk/arch/mips/include/asm/delay.h index a07e51b2be13..b0bccd2c4ed5 100644 --- a/trunk/arch/mips/include/asm/delay.h +++ b/trunk/arch/mips/include/asm/delay.h @@ -11,12 +11,94 @@ #ifndef _ASM_DELAY_H #define _ASM_DELAY_H -extern void __delay(unsigned int loops); -extern void __ndelay(unsigned int ns); -extern void __udelay(unsigned int us); +#include +#include -#define ndelay(ns) __udelay(ns) -#define udelay(us) __udelay(us) +#include +#include + +static inline void __delay(unsigned long loops) +{ + if (sizeof(long) == 4) + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " subu %0, 1 \n" + " .set reorder \n" + : "=r" (loops) + : "0" (loops)); + else if (sizeof(long) == 8 && !DADDI_WAR) + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " dsubu %0, 1 \n" + " .set reorder \n" + : "=r" (loops) + : "0" (loops)); + else if (sizeof(long) == 8 && DADDI_WAR) + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " dsubu %0, %2 \n" + " .set reorder \n" + : "=r" (loops) + : "0" (loops), "r" (1)); +} + + +/* + * Division by multiplication: you don't have to worry about + * loss of precision. + * + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ + +static inline void __udelay(unsigned long usecs, unsigned long lpj) +{ + unsigned long hi, lo; + + /* + * The rates of 128 is rounded wrongly by the catchall case + * for 64-bit. Excessive precission? Probably ... + */ +#if defined(CONFIG_64BIT) && (HZ == 128) + usecs *= 0x0008637bd05af6c7UL; /* 2**64 / (1000000 / HZ) */ +#elif defined(CONFIG_64BIT) + usecs *= (0x8000000000000000UL / (500000 / HZ)); +#else /* 32-bit junk follows here */ + usecs *= (unsigned long) (((0x8000000000000000ULL / (500000 / HZ)) + + 0x80000000ULL) >> 32); +#endif + + if (sizeof(long) == 4) + __asm__("multu\t%2, %3" + : "=h" (usecs), "=l" (lo) + : "r" (usecs), "r" (lpj) + : GCC_REG_ACCUM); + else if (sizeof(long) == 8 && !R4000_WAR) + __asm__("dmultu\t%2, %3" + : "=h" (usecs), "=l" (lo) + : "r" (usecs), "r" (lpj) + : GCC_REG_ACCUM); + else if (sizeof(long) == 8 && R4000_WAR) + __asm__("dmultu\t%3, %4\n\tmfhi\t%0" + : "=r" (usecs), "=h" (hi), "=l" (lo) + : "r" (usecs), "r" (lpj) + : GCC_REG_ACCUM); + + __delay(usecs); +} + +#define __udelay_val cpu_data[raw_smp_processor_id()].udelay_val + +#define udelay(usecs) __udelay((usecs), __udelay_val) /* make sure "usecs *= ..." in udelay do not overflow. */ #if HZ >= 1000 diff --git a/trunk/arch/mips/include/asm/ioctl.h b/trunk/arch/mips/include/asm/ioctl.h index 916163401b2c..85067e248a83 100644 --- a/trunk/arch/mips/include/asm/ioctl.h +++ b/trunk/arch/mips/include/asm/ioctl.h @@ -60,16 +60,12 @@ ((nr) << _IOC_NRSHIFT) | \ ((size) << _IOC_SIZESHIFT)) -#ifdef __KERNEL__ /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) -#else -#define _IOC_TYPECHECK(t) (sizeof(t)) -#endif /* used to create numbers */ #define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0) diff --git a/trunk/arch/mips/include/asm/irq.h b/trunk/arch/mips/include/asm/irq.h index 4f1eed107b08..3214ade02d10 100644 --- a/trunk/arch/mips/include/asm/irq.h +++ b/trunk/arch/mips/include/asm/irq.h @@ -49,7 +49,7 @@ static inline void smtc_im_ack_irq(unsigned int irq) #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF #include -extern int plat_set_irq_affinity(unsigned int irq, +extern void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity); extern void smtc_forward_irq(unsigned int irq); diff --git a/trunk/arch/mips/kernel/irq-gic.c b/trunk/arch/mips/kernel/irq-gic.c index 3f43c2e3aa5a..87deb8f6c458 100644 --- a/trunk/arch/mips/kernel/irq-gic.c +++ b/trunk/arch/mips/kernel/irq-gic.c @@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq) static DEFINE_SPINLOCK(gic_lock); -static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) +static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) { cpumask_t tmp = CPU_MASK_NONE; unsigned long flags; @@ -166,7 +166,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) cpumask_and(&tmp, cpumask, cpu_online_mask); if (cpus_empty(tmp)) - return -1; + return; /* Assumption : cpumask refers to a single CPU */ spin_lock_irqsave(&gic_lock, flags); @@ -190,7 +190,6 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) cpumask_copy(irq_desc[irq].affinity, cpumask); spin_unlock_irqrestore(&gic_lock, flags); - return 0; } #endif diff --git a/trunk/arch/mips/kernel/proc.c b/trunk/arch/mips/kernel/proc.c index e0a4ac18fa07..26760cad8b69 100644 --- a/trunk/arch/mips/kernel/proc.c +++ b/trunk/arch/mips/kernel/proc.c @@ -42,7 +42,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, fmt, __cpu_name[n], (version >> 4) & 0x0f, version & 0x0f, (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); - seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", + seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n", cpu_data[n].udelay_val / (500000/HZ), (cpu_data[n].udelay_val / (5000/HZ)) % 100); seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); diff --git a/trunk/arch/mips/lib/Makefile b/trunk/arch/mips/lib/Makefile index 2adead5a8a37..c13c7ad2cdae 100644 --- a/trunk/arch/mips/lib/Makefile +++ b/trunk/arch/mips/lib/Makefile @@ -2,8 +2,8 @@ # Makefile for MIPS-specific library files.. # -lib-y += csum_partial.o delay.o memcpy.o memcpy-inatomic.o memset.o \ - strlen_user.o strncpy_user.o strnlen_user.o uncached.o +lib-y += csum_partial.o memcpy.o memcpy-inatomic.o memset.o strlen_user.o \ + strncpy_user.o strnlen_user.o uncached.o obj-y += iomap.o obj-$(CONFIG_PCI) += iomap-pci.o diff --git a/trunk/arch/mips/lib/delay.c b/trunk/arch/mips/lib/delay.c deleted file mode 100644 index f69c6b569eb3..000000000000 --- a/trunk/arch/mips/lib/delay.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1994 by Waldorf Electronics - * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle - * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2007 Maciej W. Rozycki - */ -#include -#include -#include - -#include -#include - -inline void __delay(unsigned int loops) -{ - __asm__ __volatile__ ( - " .set noreorder \n" - " .align 3 \n" - "1: bnez %0, 1b \n" - " subu %0, 1 \n" - " .set reorder \n" - : "=r" (loops) - : "0" (loops)); -} -EXPORT_SYMBOL(__delay); - -/* - * Division by multiplication: you don't have to worry about - * loss of precision. - * - * Use only for very small delays ( < 1 msec). Should probably use a - * lookup table, really, as the multiplications take much too long with - * short delays. This is a "reasonable" implementation, though (and the - * first constant multiplications gets optimized away if the delay is - * a constant) - */ - -void __udelay(unsigned long us) -{ - unsigned int lpj = current_cpu_data.udelay_val; - - __delay((us * 0x000010c7 * HZ * lpj) >> 32); -} -EXPORT_SYMBOL(__udelay); - -void __ndelay(unsigned long ns) -{ - unsigned int lpj = current_cpu_data.udelay_val; - - __delay((us * 0x00000005 * HZ * lpj) >> 32); -} -EXPORT_SYMBOL(__ndelay); diff --git a/trunk/arch/mips/mti-malta/malta-smtc.c b/trunk/arch/mips/mti-malta/malta-smtc.c index 499ffe5475df..5ba31888fefb 100644 --- a/trunk/arch/mips/mti-malta/malta-smtc.c +++ b/trunk/arch/mips/mti-malta/malta-smtc.c @@ -114,7 +114,7 @@ struct plat_smp_ops msmtc_smp_ops = { */ -int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) +void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) { cpumask_t tmask; int cpu = 0; @@ -156,7 +156,5 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) /* Do any generic SMTC IRQ affinity setup */ smtc_set_irq_affinity(irq, tmask); - - return 0; } #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ diff --git a/trunk/arch/mips/sibyte/bcm1480/irq.c b/trunk/arch/mips/sibyte/bcm1480/irq.c index 690de06bde90..c147c4b35d3f 100644 --- a/trunk/arch/mips/sibyte/bcm1480/irq.c +++ b/trunk/arch/mips/sibyte/bcm1480/irq.c @@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq); static void disable_bcm1480_irq(unsigned int irq); static void ack_bcm1480_irq(unsigned int irq); #ifdef CONFIG_SMP -static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); +static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); #endif #ifdef CONFIG_PCI @@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq) } #ifdef CONFIG_SMP -static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) +static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on, k; u64 cur_ints; @@ -118,7 +118,7 @@ static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) if (cpumask_weight(mask) != 1) { printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return -1; + return; } i = cpumask_first(mask); @@ -152,8 +152,6 @@ static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) } } spin_unlock_irqrestore(&bcm1480_imr_lock, flags); - - return 0; } #endif diff --git a/trunk/arch/mips/sibyte/cfe/setup.c b/trunk/arch/mips/sibyte/cfe/setup.c index eb5396cf81bb..3de30f79db3f 100644 --- a/trunk/arch/mips/sibyte/cfe/setup.c +++ b/trunk/arch/mips/sibyte/cfe/setup.c @@ -288,7 +288,13 @@ void __init prom_init(void) */ cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE); if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, CL_SIZE) < 0) { - if (argc >= 0) { + if (argc < 0) { + /* + * It's OK for direct boot to not provide a + * command line + */ + strcpy(arcs_cmdline, "root=/dev/ram0 "); + } else { /* The loader should have set the command line */ /* too early for panic to do any good */ printk("LINUX_CMDLINE not defined in cfe."); diff --git a/trunk/arch/mips/sibyte/sb1250/irq.c b/trunk/arch/mips/sibyte/sb1250/irq.c index 409dec798863..38cb998ade22 100644 --- a/trunk/arch/mips/sibyte/sb1250/irq.c +++ b/trunk/arch/mips/sibyte/sb1250/irq.c @@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq); static void disable_sb1250_irq(unsigned int irq); static void ack_sb1250_irq(unsigned int irq); #ifdef CONFIG_SMP -static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); +static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); #endif #ifdef CONFIG_SIBYTE_HAS_LDT @@ -103,7 +103,7 @@ void sb1250_unmask_irq(int cpu, int irq) } #ifdef CONFIG_SMP -static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) +static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on; u64 cur_ints; @@ -113,7 +113,7 @@ static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) if (cpumask_weight(mask) > 1) { printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return -1; + return; } /* Convert logical CPU to physical CPU */ @@ -143,8 +143,6 @@ static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) R_IMR_INTERRUPT_MASK)); } spin_unlock_irqrestore(&sb1250_imr_lock, flags); - - return 0; } #endif diff --git a/trunk/arch/parisc/kernel/irq.c b/trunk/arch/parisc/kernel/irq.c index 8007f1e65729..4ea4229d765c 100644 --- a/trunk/arch/parisc/kernel/irq.c +++ b/trunk/arch/parisc/kernel/irq.c @@ -130,17 +130,15 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) return cpu_dest; } -static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) +static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) { int cpu_dest; cpu_dest = cpu_check_affinity(irq, dest); if (cpu_dest < 0) - return -1; + return; cpumask_copy(&irq_desc[irq].affinity, dest); - - return 0; } #endif diff --git a/trunk/arch/powerpc/configs/pmac32_defconfig b/trunk/arch/powerpc/configs/pmac32_defconfig index ea8870a34482..5339bb44cce9 100644 --- a/trunk/arch/powerpc/configs/pmac32_defconfig +++ b/trunk/arch/powerpc/configs/pmac32_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.30-rc7 -# Mon May 25 14:53:25 2009 +# Linux kernel version: 2.6.28-rc3 +# Tue Nov 11 19:36:51 2008 # # CONFIG_PPC64 is not set @@ -14,7 +14,6 @@ CONFIG_6xx=y # CONFIG_40x is not set # CONFIG_44x is not set # CONFIG_E200 is not set -CONFIG_PPC_BOOK3S=y CONFIG_PPC_FPU=y CONFIG_ALTIVEC=y CONFIG_PPC_STD_MMU=y @@ -44,7 +43,7 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_PPC=y CONFIG_EARLY_PRINTK=y CONFIG_GENERIC_NVRAM=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_PPC_OF=y CONFIG_OF=y @@ -53,14 +52,12 @@ CONFIG_OF=y CONFIG_AUDIT_ARCH=y CONFIG_GENERIC_BUG=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y -CONFIG_DTC=y # CONFIG_DEFAULT_UIMAGE is not set CONFIG_HIBERNATE_32=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y # CONFIG_PPC_DCR_NATIVE is not set # CONFIG_PPC_DCR_MMIO is not set -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" # @@ -75,24 +72,14 @@ CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set - -# -# RCU Subsystem -# -CONFIG_CLASSIC_RCU=y -# CONFIG_TREE_RCU is not set -# CONFIG_PREEMPT_RCU is not set -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_PREEMPT_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_GROUP_SCHED is not set # CONFIG_CGROUPS is not set +# CONFIG_GROUP_SCHED is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set @@ -101,27 +88,23 @@ CONFIG_NAMESPACES=y # CONFIG_IPC_NS is not set # CONFIG_USER_NS is not set # CONFIG_PID_NS is not set -# CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y # CONFIG_EMBEDDED is not set CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y # CONFIG_KALLSYMS_EXTRA_PASS is not set -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y +# CONFIG_COMPAT_BRK is not set CONFIG_BASE_FULL=y CONFIG_FUTEX=y +CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y @@ -131,12 +114,10 @@ CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y CONFIG_PCI_QUIRKS=y CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y # CONFIG_MARKERS is not set CONFIG_OPROFILE=y CONFIG_HAVE_OPROFILE=y @@ -146,10 +127,10 @@ CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y -# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y +# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -157,8 +138,11 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y CONFIG_BLOCK=y CONFIG_LBD=y +# CONFIG_BLK_DEV_IO_TRACE is not set +CONFIG_LSF=y CONFIG_BLK_DEV_BSG=y # CONFIG_BLK_DEV_INTEGRITY is not set @@ -174,11 +158,14 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" +CONFIG_CLASSIC_RCU=y CONFIG_FREEZER=y # # Platform support # +CONFIG_PPC_MULTIPLATFORM=y +CONFIG_CLASSIC32=y # CONFIG_PPC_CHRP is not set # CONFIG_MPC5121_ADS is not set # CONFIG_MPC5121_GENERIC is not set @@ -191,9 +178,7 @@ CONFIG_PPC_PMAC=y # CONFIG_PPC_83xx is not set # CONFIG_PPC_86xx is not set # CONFIG_EMBEDDED6xx is not set -# CONFIG_AMIGAONE is not set CONFIG_PPC_NATIVE=y -CONFIG_PPC_OF_BOOT_TRAMPOLINE=y # CONFIG_IPIC is not set CONFIG_MPIC=y # CONFIG_MPIC_WEIRD is not set @@ -227,12 +212,11 @@ CONFIG_CPU_FREQ_PMAC=y CONFIG_PPC601_SYNC_FIX=y # CONFIG_TAU is not set # CONFIG_FSL_ULI1575 is not set -# CONFIG_SIMPLE_GPIO is not set # # Kernel options # -CONFIG_HIGHMEM=y +# CONFIG_HIGHMEM is not set CONFIG_TICK_ONESHOT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y @@ -255,7 +239,6 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_ARCH_HAS_WALK_MEMORY=y CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y # CONFIG_KEXEC is not set -# CONFIG_CRASH_DUMP is not set CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_SELECT_MEMORY_MODEL=y @@ -267,17 +250,12 @@ CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_MIGRATION is not set +# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y CONFIG_UNEVICTABLE_LRU=y -CONFIG_HAVE_MLOCK=y -CONFIG_HAVE_MLOCKED_PAGE_BIT=y -CONFIG_PPC_4K_PAGES=y -# CONFIG_PPC_16K_PAGES is not set -# CONFIG_PPC_64K_PAGES is not set -# CONFIG_PPC_256K_PAGES is not set CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_PROC_DEVICETREE=y # CONFIG_CMDLINE_BOOL is not set @@ -310,8 +288,6 @@ CONFIG_ARCH_SUPPORTS_MSI=y # CONFIG_PCI_MSI is not set # CONFIG_PCI_LEGACY is not set # CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_STUB is not set -# CONFIG_PCI_IOV is not set CONFIG_PCCARD=m # CONFIG_PCMCIA_DEBUG is not set CONFIG_PCMCIA=m @@ -421,8 +397,6 @@ CONFIG_NETFILTER_XTABLES=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m # CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set # CONFIG_NETFILTER_XT_TARGET_DSCP is not set -CONFIG_NETFILTER_XT_TARGET_HL=m -# CONFIG_NETFILTER_XT_TARGET_LED is not set CONFIG_NETFILTER_XT_TARGET_MARK=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m @@ -431,7 +405,6 @@ CONFIG_NETFILTER_XT_TARGET_RATEEST=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set CONFIG_NETFILTER_XT_MATCH_COMMENT=m # CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m @@ -442,7 +415,6 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m # CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m @@ -506,15 +478,17 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_IP_DCCP=m CONFIG_INET_DCCP_DIAG=m +CONFIG_IP_DCCP_ACKVEC=y # # DCCP CCIDs Configuration (EXPERIMENTAL) # +CONFIG_IP_DCCP_CCID2=m # CONFIG_IP_DCCP_CCID2_DEBUG is not set -CONFIG_IP_DCCP_CCID3=y +CONFIG_IP_DCCP_CCID3=m # CONFIG_IP_DCCP_CCID3_DEBUG is not set CONFIG_IP_DCCP_CCID3_RTO=100 -CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_LIB=m # # DCCP Kernel Hacking @@ -534,16 +508,13 @@ CONFIG_IP_DCCP_TFRC_LIB=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set # CONFIG_NET_SCHED is not set CONFIG_NET_CLS_ROUTE=y -# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set # CONFIG_HAMRADIO is not set # CONFIG_CAN is not set CONFIG_IRDA=m @@ -606,6 +577,8 @@ CONFIG_BT_HIDP=m # # Bluetooth device drivers # +CONFIG_BT_HCIUSB=m +# CONFIG_BT_HCIUSB_SCO is not set # CONFIG_BT_HCIBTUSB is not set # CONFIG_BT_HCIUART is not set CONFIG_BT_HCIBCM203X=m @@ -617,27 +590,31 @@ CONFIG_BT_HCIBFUSB=m # CONFIG_BT_HCIBTUART is not set # CONFIG_BT_HCIVHCI is not set # CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set CONFIG_WIRELESS=y CONFIG_CFG80211=m -# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_NL80211=y CONFIG_WIRELESS_OLD_REGULATORY=y CONFIG_WIRELESS_EXT=y CONFIG_WIRELESS_EXT_SYSFS=y -# CONFIG_LIB80211 is not set CONFIG_MAC80211=m # # Rate control algorithm selection # -CONFIG_MAC80211_RC_MINSTREL=y -# CONFIG_MAC80211_RC_DEFAULT_PID is not set -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel" +CONFIG_MAC80211_RC_PID=y +# CONFIG_MAC80211_RC_MINSTREL is not set +CONFIG_MAC80211_RC_DEFAULT_PID=y +# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set +CONFIG_MAC80211_RC_DEFAULT="pid" # CONFIG_MAC80211_MESH is not set CONFIG_MAC80211_LEDS=y -# CONFIG_MAC80211_DEBUGFS is not set # CONFIG_MAC80211_DEBUG_MENU is not set -# CONFIG_WIMAX is not set +CONFIG_IEEE80211=m +# CONFIG_IEEE80211_DEBUG is not set +CONFIG_IEEE80211_CRYPT_WEP=m +CONFIG_IEEE80211_CRYPT_CCMP=m +CONFIG_IEEE80211_CRYPT_TKIP=m # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -685,27 +662,17 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y # CONFIG_PHANTOM is not set +# CONFIG_EEPROM_93CX6 is not set # CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_HP_ILO is not set -# CONFIG_ISL29003 is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y CONFIG_IDE=y # # Please see Documentation/ide/ide.txt for help/info on IDE drives # -CONFIG_IDE_XFER_MODE=y CONFIG_IDE_TIMINGS=y CONFIG_IDE_ATAPI=y # CONFIG_BLK_DEV_IDE_SATA is not set @@ -717,6 +684,7 @@ CONFIG_BLK_DEV_IDECS=m CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y # CONFIG_BLK_DEV_IDETAPE is not set +CONFIG_BLK_DEV_IDESCSI=y # CONFIG_IDE_TASK_IOCTL is not set CONFIG_IDE_PROC_FS=y @@ -746,7 +714,6 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y # CONFIG_BLK_DEV_JMICRON is not set # CONFIG_BLK_DEV_SC1200 is not set # CONFIG_BLK_DEV_PIIX is not set -# CONFIG_BLK_DEV_IT8172 is not set # CONFIG_BLK_DEV_IT8213 is not set # CONFIG_BLK_DEV_IT821X is not set # CONFIG_BLK_DEV_NS87415 is not set @@ -761,6 +728,7 @@ CONFIG_BLK_DEV_SL82C105=y # CONFIG_BLK_DEV_TC86C001 is not set CONFIG_BLK_DEV_IDE_PMAC=y CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y +CONFIG_BLK_DEV_IDEDMA_PMAC=y CONFIG_BLK_DEV_IDEDMA=y # @@ -804,7 +772,6 @@ CONFIG_SCSI_FC_ATTRS=y # CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set -# CONFIG_SCSI_CXGB3_ISCSI is not set # CONFIG_BLK_DEV_3W_XXXX_RAID is not set # CONFIG_SCSI_3W_9XXX is not set # CONFIG_SCSI_ACARD is not set @@ -824,12 +791,8 @@ CONFIG_SCSI_AIC7XXX_OLD=m # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set # CONFIG_MEGARAID_SAS is not set -# CONFIG_SCSI_MPT2SAS is not set # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_LIBFC is not set -# CONFIG_LIBFCOE is not set -# CONFIG_FCOE is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_EATA is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set @@ -859,7 +822,6 @@ CONFIG_SCSI_MAC53C94=y # CONFIG_SCSI_SRP is not set # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set # CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set # CONFIG_ATA is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=m @@ -919,7 +881,6 @@ CONFIG_THERM_ADT746X=m # CONFIG_ANSLCD is not set CONFIG_PMAC_RACKMETER=m CONFIG_NETDEVICES=y -CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_DUMMY=m # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set @@ -937,8 +898,6 @@ CONFIG_BMAC=y CONFIG_SUNGEM=y # CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set -# CONFIG_ETHOC is not set -# CONFIG_DNET is not set # CONFIG_NET_TULIP is not set # CONFIG_HP100 is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set @@ -954,6 +913,7 @@ CONFIG_PCNET32=y # CONFIG_ADAPTEC_STARFIRE is not set # CONFIG_B44 is not set # CONFIG_FORCEDETH is not set +# CONFIG_EEPRO100 is not set # CONFIG_E100 is not set # CONFIG_FEALNX is not set # CONFIG_NATSEMI is not set @@ -963,7 +923,6 @@ CONFIG_PCNET32=y # CONFIG_R6040 is not set # CONFIG_SIS900 is not set # CONFIG_EPIC100 is not set -# CONFIG_SMSC9420 is not set # CONFIG_SUNDANCE is not set # CONFIG_TLAN is not set # CONFIG_VIA_RHINE is not set @@ -976,7 +935,6 @@ CONFIG_NETDEV_1000=y # CONFIG_E1000E is not set # CONFIG_IP1000 is not set # CONFIG_IGB is not set -# CONFIG_IGBVF is not set # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -987,20 +945,18 @@ CONFIG_NETDEV_1000=y # CONFIG_VIA_VELOCITY is not set # CONFIG_TIGON3 is not set # CONFIG_BNX2 is not set +# CONFIG_MV643XX_ETH is not set # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set # CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set # CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set -CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_CHELSIO_T3 is not set # CONFIG_ENIC is not set # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set -# CONFIG_VXGE is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set @@ -1010,7 +966,6 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_BNX2X is not set # CONFIG_QLGE is not set # CONFIG_SFC is not set -# CONFIG_BE2NET is not set # CONFIG_TR is not set # @@ -1019,11 +974,20 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_WLAN_PRE80211 is not set CONFIG_WLAN_80211=y # CONFIG_PCMCIA_RAYCS is not set +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set # CONFIG_LIBERTAS is not set # CONFIG_LIBERTAS_THINFIRM is not set # CONFIG_AIRO is not set +CONFIG_HERMES=m +CONFIG_APPLE_AIRPORT=m +# CONFIG_PLX_HERMES is not set +# CONFIG_TMD_HERMES is not set +# CONFIG_NORTEL_HERMES is not set +CONFIG_PCI_HERMES=m +CONFIG_PCMCIA_HERMES=m +# CONFIG_PCMCIA_SPECTRUM is not set # CONFIG_ATMEL is not set -# CONFIG_AT76C50X_USB is not set # CONFIG_AIRO_CS is not set # CONFIG_PCMCIA_WL3501 is not set CONFIG_PRISM54=m @@ -1033,17 +997,15 @@ CONFIG_PRISM54=m # CONFIG_RTL8187 is not set # CONFIG_ADM8211 is not set # CONFIG_MAC80211_HWSIM is not set -# CONFIG_MWL8K is not set CONFIG_P54_COMMON=m # CONFIG_P54_USB is not set # CONFIG_P54_PCI is not set -CONFIG_P54_LEDS=y # CONFIG_ATH5K is not set # CONFIG_ATH9K is not set -# CONFIG_AR9170_USB is not set -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_IWLWIFI is not set +# CONFIG_IWLCORE is not set +# CONFIG_IWLWIFI_LEDS is not set +# CONFIG_IWLAGN is not set +# CONFIG_IWL3945 is not set # CONFIG_HOSTAP is not set CONFIG_B43=m CONFIG_B43_PCI_AUTOSELECT=y @@ -1063,19 +1025,6 @@ CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y # CONFIG_B43LEGACY_PIO_MODE is not set # CONFIG_ZD1211RW is not set # CONFIG_RT2X00 is not set -CONFIG_HERMES=m -CONFIG_HERMES_CACHE_FW_ON_INIT=y -CONFIG_APPLE_AIRPORT=m -# CONFIG_PLX_HERMES is not set -# CONFIG_TMD_HERMES is not set -# CONFIG_NORTEL_HERMES is not set -CONFIG_PCI_HERMES=m -CONFIG_PCMCIA_HERMES=m -# CONFIG_PCMCIA_SPECTRUM is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# # # USB Network Adapters @@ -1087,7 +1036,6 @@ CONFIG_PCMCIA_HERMES=m CONFIG_USB_USBNET=m CONFIG_USB_NET_AX8817X=m CONFIG_USB_NET_CDCETHER=m -# CONFIG_USB_NET_CDC_EEM is not set # CONFIG_USB_NET_DM9601 is not set # CONFIG_USB_NET_SMSC95XX is not set # CONFIG_USB_NET_GL620A is not set @@ -1151,7 +1099,7 @@ CONFIG_INPUT_KEYBOARD=y CONFIG_INPUT_MOUSE=y # CONFIG_MOUSE_PS2 is not set # CONFIG_MOUSE_SERIAL is not set -CONFIG_MOUSE_APPLETOUCH=y +# CONFIG_MOUSE_APPLETOUCH is not set # CONFIG_MOUSE_BCM5974 is not set # CONFIG_MOUSE_VSXXXAA is not set # CONFIG_INPUT_JOYSTICK is not set @@ -1202,13 +1150,10 @@ CONFIG_SERIAL_PMACZILOG_TTYS=y # CONFIG_SERIAL_JSM is not set # CONFIG_SERIAL_OF_PLATFORM is not set CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 -# CONFIG_HVC_UDBG is not set # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=m -# CONFIG_HW_RANDOM_TIMERIOMEM is not set CONFIG_NVRAM=y CONFIG_GEN_RTC=y # CONFIG_GEN_RTC_X is not set @@ -1287,9 +1232,12 @@ CONFIG_I2C_POWERMAC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set @@ -1311,11 +1259,11 @@ CONFIG_BATTERY_PMU=y # CONFIG_THERMAL is not set # CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # +CONFIG_SSB_POSSIBLE=y CONFIG_SSB=m CONFIG_SSB_SPROM=y CONFIG_SSB_PCIHOST_POSSIBLE=y @@ -1333,13 +1281,18 @@ CONFIG_SSB_DRIVER_PCICORE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set -# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_PCF50633 is not set + +# +# Voltage and Current regulators +# # CONFIG_REGULATOR is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set # # Multimedia devices @@ -1437,7 +1390,6 @@ CONFIG_FB_ATY_BACKLIGHT=y # CONFIG_FB_KYRO is not set CONFIG_FB_3DFX=y # CONFIG_FB_3DFX_ACCEL is not set -CONFIG_FB_3DFX_I2C=y # CONFIG_FB_VOODOO1 is not set # CONFIG_FB_VT8623 is not set # CONFIG_FB_TRIDENT is not set @@ -1447,14 +1399,12 @@ CONFIG_FB_3DFX_I2C=y # CONFIG_FB_IBM_GXT4500 is not set # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=m # CONFIG_LCD_ILI9320 is not set # CONFIG_LCD_PLATFORM is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_GENERIC=y +# CONFIG_BACKLIGHT_CORGI is not set # # Display device support @@ -1494,13 +1444,11 @@ CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y CONFIG_SND_SEQUENCER_OSS=y -# CONFIG_SND_HRTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set -CONFIG_SND_VMASTER=y CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m # CONFIG_SND_VIRMIDI is not set @@ -1538,8 +1486,6 @@ CONFIG_SND_PCI=y # CONFIG_SND_INDIGO is not set # CONFIG_SND_INDIGOIO is not set # CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set # CONFIG_SND_EMU10K1 is not set # CONFIG_SND_EMU10K1X is not set # CONFIG_SND_ENS1370 is not set @@ -1605,31 +1551,28 @@ CONFIG_USB_HID=y # # Special HID drivers # +CONFIG_HID_COMPAT=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y +CONFIG_HID_BRIGHT=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y -# CONFIG_DRAGONRISE_FF is not set +CONFIG_HID_DELL=y CONFIG_HID_EZKEY=y -CONFIG_HID_KYE=y CONFIG_HID_GYRATION=y -CONFIG_HID_KENSINGTON=y CONFIG_HID_LOGITECH=y # CONFIG_LOGITECH_FF is not set # CONFIG_LOGIRUMBLEPAD2_FF is not set CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y -CONFIG_HID_NTRIG=y CONFIG_HID_PANTHERLORD=y # CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y -# CONFIG_GREENASIA_FF is not set -CONFIG_HID_TOPSEED=y # CONFIG_THRUSTMASTER_FF is not set # CONFIG_ZEROPLUS_FF is not set CONFIG_USB_SUPPORT=y @@ -1660,7 +1603,6 @@ CONFIG_USB_EHCI_HCD=m CONFIG_USB_EHCI_ROOT_HUB_TT=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set # CONFIG_USB_EHCI_HCD_PPC_OF is not set -# CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set CONFIG_USB_OHCI_HCD=y @@ -1683,23 +1625,24 @@ CONFIG_USB_PRINTER=m # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' # # -# also be needed; see USB_STORAGE Help for more info +# may also be needed; see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set # CONFIG_USB_STORAGE_JUMPSHOT is not set # CONFIG_USB_STORAGE_ALAUDA is not set -CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_ONETOUCH=y # CONFIG_USB_STORAGE_KARMA is not set # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set # CONFIG_USB_LIBUSUAL is not set @@ -1722,7 +1665,7 @@ CONFIG_USB_EZUSB=y # CONFIG_USB_SERIAL_CH341 is not set # CONFIG_USB_SERIAL_WHITEHEAT is not set # CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set -# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CP2101 is not set # CONFIG_USB_SERIAL_CYPRESS_M8 is not set # CONFIG_USB_SERIAL_EMPEG is not set # CONFIG_USB_SERIAL_FTDI_SIO is not set @@ -1758,19 +1701,15 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y # CONFIG_USB_SERIAL_NAVMAN is not set # CONFIG_USB_SERIAL_PL2303 is not set # CONFIG_USB_SERIAL_OTI6858 is not set -# CONFIG_USB_SERIAL_QUALCOMM is not set # CONFIG_USB_SERIAL_SPCP8X5 is not set # CONFIG_USB_SERIAL_HP4X is not set # CONFIG_USB_SERIAL_SAFE is not set -# CONFIG_USB_SERIAL_SIEMENS_MPI is not set # CONFIG_USB_SERIAL_SIERRAWIRELESS is not set -# CONFIG_USB_SERIAL_SYMBOL is not set # CONFIG_USB_SERIAL_TI is not set # CONFIG_USB_SERIAL_CYBERJACK is not set # CONFIG_USB_SERIAL_XIRCOM is not set # CONFIG_USB_SERIAL_OPTION is not set # CONFIG_USB_SERIAL_OMNINET is not set -# CONFIG_USB_SERIAL_OPTICON is not set # CONFIG_USB_SERIAL_DEBUG is not set # @@ -1787,6 +1726,7 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set # CONFIG_USB_IDMOUSE is not set # CONFIG_USB_FTDI_ELAN is not set CONFIG_USB_APPLEDISPLAY=m @@ -1798,11 +1738,6 @@ CONFIG_USB_APPLEDISPLAY=m # CONFIG_USB_ISIGHTFW is not set # CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set - -# -# OTG and related infrastructure -# -# CONFIG_NOP_USB_XCEIV is not set # CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -1813,9 +1748,7 @@ CONFIG_LEDS_CLASS=y # LED drivers # # CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_LP5521 is not set # CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -1826,16 +1759,11 @@ CONFIG_LEDS_TRIGGER_IDE_DISK=y # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=y - -# -# iptables trigger is under Netfilter config (LED target) -# # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set # CONFIG_EDAC is not set # CONFIG_RTC_CLASS is not set # CONFIG_DMADEVICES is not set -# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # CONFIG_STAGING is not set @@ -1846,7 +1774,6 @@ CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y # CONFIG_EXT3_FS_SECURITY is not set @@ -1856,9 +1783,7 @@ CONFIG_EXT4_FS_XATTR=y # CONFIG_EXT4_FS_POSIX_ACL is not set # CONFIG_EXT4_FS_SECURITY is not set CONFIG_JBD=y -# CONFIG_JBD_DEBUG is not set CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set @@ -1867,7 +1792,6 @@ CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1876,11 +1800,6 @@ CONFIG_INOTIFY_USER=y CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=m -# -# Caches -# -# CONFIG_FSCACHE is not set - # # CD-ROM/DVD Filesystems # @@ -1912,7 +1831,10 @@ CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set -CONFIG_MISC_FILESYSTEMS=y + +# +# Miscellaneous filesystems +# # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set CONFIG_HFS_FS=m @@ -1921,7 +1843,6 @@ CONFIG_HFSPLUS_FS=m # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_CRAMFS is not set -# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1930,7 +1851,6 @@ CONFIG_HFSPLUS_FS=m # CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y @@ -1948,6 +1868,7 @@ CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -2019,13 +1940,11 @@ CONFIG_NLS_ISO8859_1=m # CONFIG_NLS_KOI8_U is not set CONFIG_NLS_UTF8=m # CONFIG_DLM is not set -CONFIG_BINARY_PRINTF=y # # Library routines # CONFIG_BITREVERSE=y -CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=y CONFIG_CRC16=y CONFIG_CRC_T10DIF=y @@ -2035,18 +1954,15 @@ CONFIG_CRC32=y CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y CONFIG_TEXTSEARCH=y CONFIG_TEXTSEARCH_KMP=m CONFIG_TEXTSEARCH_BM=m CONFIG_TEXTSEARCH_FSM=m +CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y CONFIG_HAVE_LMB=y -CONFIG_NLATTR=y # # Kernel hacking @@ -2057,16 +1973,13 @@ CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set -CONFIG_DEBUG_FS=y +# CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_DETECT_HUNG_TASK=y -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y # CONFIG_TIMER_STATS is not set @@ -2081,7 +1994,6 @@ CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_HIGHMEM is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set @@ -2089,7 +2001,6 @@ CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set @@ -2098,14 +2009,7 @@ CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_FAULT_INJECTION is not set CONFIG_LATENCYTOP=y CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_RING_BUFFER=y -CONFIG_TRACING=y -CONFIG_TRACING_SUPPORT=y # # Tracers @@ -2113,19 +2017,12 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_FUNCTION_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_EVENT_TRACER is not set # CONFIG_BOOT_TRACER is not set -# CONFIG_TRACE_BRANCH_PROFILING is not set # CONFIG_STACK_TRACER is not set -# CONFIG_KMEMTRACE is not set -# CONFIG_WORKQUEUE_TRACER is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set -CONFIG_PRINT_STACK_DEPTH=64 # CONFIG_DEBUG_STACKOVERFLOW is not set # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_CODE_PATCHING_SELFTEST is not set @@ -2136,7 +2033,6 @@ CONFIG_XMON_DEFAULT=y CONFIG_XMON_DISASSEMBLY=y CONFIG_DEBUGGER=y CONFIG_IRQSTACKS=y -# CONFIG_VIRQ_DEBUG is not set # CONFIG_BDI_SWITCH is not set CONFIG_BOOTX_TEXT=y # CONFIG_PPC_EARLY_DEBUG is not set @@ -2155,20 +2051,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_FIPS is not set CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_RNG=y CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=y # CONFIG_CRYPTO_TEST is not set @@ -2238,7 +2127,6 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m # Compression # CONFIG_CRYPTO_DEFLATE=m -# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # diff --git a/trunk/arch/powerpc/platforms/pseries/xics.c b/trunk/arch/powerpc/platforms/pseries/xics.c index be3581a8c294..80b513449f4c 100644 --- a/trunk/arch/powerpc/platforms/pseries/xics.c +++ b/trunk/arch/powerpc/platforms/pseries/xics.c @@ -333,7 +333,7 @@ static void xics_eoi_lpar(unsigned int virq) lpar_xirr_info_set((0xff << 24) | irq); } -static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) +static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) { unsigned int irq; int status; @@ -342,14 +342,14 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) irq = (unsigned int)irq_map[virq].hwirq; if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) - return -1; + return; status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", __func__, irq, status); - return -1; + return; } /* @@ -363,7 +363,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) printk(KERN_WARNING "%s: No online cpus in the mask %s for irq %d\n", __func__, cpulist, virq); - return -1; + return; } status = rtas_call(ibm_set_xive, 3, 1, NULL, @@ -372,10 +372,8 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", __func__, irq, status); - return -1; + return; } - - return 0; } static struct irq_chip xics_pic_direct = { diff --git a/trunk/arch/powerpc/sysdev/mpic.c b/trunk/arch/powerpc/sysdev/mpic.c index 352d8c3ef526..0efc12d1a3d7 100644 --- a/trunk/arch/powerpc/sysdev/mpic.c +++ b/trunk/arch/powerpc/sysdev/mpic.c @@ -807,7 +807,7 @@ static void mpic_end_ipi(unsigned int irq) #endif /* CONFIG_SMP */ -int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) +void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); @@ -824,8 +824,6 @@ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), mpic_physmask(cpus_addr(tmp)[0])); } - - return 0; } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) diff --git a/trunk/arch/powerpc/sysdev/mpic.h b/trunk/arch/powerpc/sysdev/mpic.h index eff433c322a0..3cef2af10f42 100644 --- a/trunk/arch/powerpc/sysdev/mpic.h +++ b/trunk/arch/powerpc/sysdev/mpic.h @@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); extern void mpic_set_vector(unsigned int virq, unsigned int vector); -extern int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); +extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); #endif /* _POWERPC_SYSDEV_MPIC_H */ diff --git a/trunk/arch/sparc/kernel/irq_64.c b/trunk/arch/sparc/kernel/irq_64.c index e5e78f9cfc95..5deabe921a47 100644 --- a/trunk/arch/sparc/kernel/irq_64.c +++ b/trunk/arch/sparc/kernel/irq_64.c @@ -318,12 +318,10 @@ static void sun4u_irq_enable(unsigned int virt_irq) } } -static int sun4u_set_affinity(unsigned int virt_irq, +static void sun4u_set_affinity(unsigned int virt_irq, const struct cpumask *mask) { sun4u_irq_enable(virt_irq); - - return 0; } /* Don't do anything. The desc->status check for IRQ_DISABLED in @@ -379,7 +377,7 @@ static void sun4v_irq_enable(unsigned int virt_irq) ino, err); } -static int sun4v_set_affinity(unsigned int virt_irq, +static void sun4v_set_affinity(unsigned int virt_irq, const struct cpumask *mask) { unsigned int ino = virt_irq_table[virt_irq].dev_ino; @@ -390,8 +388,6 @@ static int sun4v_set_affinity(unsigned int virt_irq, if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " "err(%d)\n", ino, cpuid, err); - - return 0; } static void sun4v_irq_disable(unsigned int virt_irq) @@ -449,7 +445,7 @@ static void sun4v_virq_enable(unsigned int virt_irq) dev_handle, dev_ino, err); } -static int sun4v_virt_set_affinity(unsigned int virt_irq, +static void sun4v_virt_set_affinity(unsigned int virt_irq, const struct cpumask *mask) { unsigned long cpuid, dev_handle, dev_ino; @@ -465,8 +461,6 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq, printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " "err(%d)\n", dev_handle, dev_ino, cpuid, err); - - return 0; } static void sun4v_virq_disable(unsigned int virt_irq) diff --git a/trunk/arch/x86/Kbuild b/trunk/arch/x86/Kbuild deleted file mode 100644 index ad8ec356fb36..000000000000 --- a/trunk/arch/x86/Kbuild +++ /dev/null @@ -1,16 +0,0 @@ - -obj-$(CONFIG_KVM) += kvm/ - -# Xen paravirtualization support -obj-$(CONFIG_XEN) += xen/ - -# lguest paravirtualization support -obj-$(CONFIG_LGUEST_GUEST) += lguest/ - -obj-y += kernel/ -obj-y += mm/ - -obj-y += crypto/ -obj-y += vdso/ -obj-$(CONFIG_IA32_EMULATION) += ia32/ - diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index aafae3b140de..a6efe0a2e9ae 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -47,11 +47,6 @@ config X86 select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA -config OUTPUT_FORMAT - string - default "elf32-i386" if X86_32 - default "elf64-x86-64" if X86_64 - config ARCH_DEFCONFIG string default "arch/x86/configs/i386_defconfig" if X86_32 @@ -279,9 +274,15 @@ config SPARSE_IRQ If you don't know what to do here, say N. -config NUMA_IRQ_DESC - def_bool y +config NUMA_MIGRATE_IRQ_DESC + bool "Move irq desc when changing irq smp_affinity" depends on SPARSE_IRQ && NUMA + depends on BROKEN + default n + ---help--- + This enables moving irq_desc to cpu/node that irq will use handled. + + If you don't know what to do here, say N. config X86_MPPARSE bool "Enable MPS table" if ACPI @@ -354,7 +355,7 @@ config X86_UV depends on X86_64 depends on X86_EXTENDED_PLATFORM depends on NUMA - depends on X86_X2APIC + select X86_X2APIC ---help--- This option is needed in order to support SGI Ultraviolet systems. If you don't have one of these, you should say N here. @@ -1465,7 +1466,9 @@ config KEXEC_JUMP config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) - default "0x1000000" + default "0x1000000" if X86_NUMAQ + default "0x200000" if X86_64 + default "0x100000" ---help--- This gives the physical address where the kernel is loaded. @@ -1484,15 +1487,15 @@ config PHYSICAL_START to be specifically compiled to run from a specific memory area (normally a reserved region) and this option comes handy. - So if you are using bzImage for capturing the crash dump, - leave the value here unchanged to 0x1000000 and set - CONFIG_RELOCATABLE=y. Otherwise if you plan to use vmlinux - for capturing the crash dump change this value to start of - the reserved region. In other words, it can be set based on - the "X" value as specified in the "crashkernel=YM@XM" - command line boot parameter passed to the panic-ed - kernel. Please take a look at Documentation/kdump/kdump.txt - for more details about crash dumps. + So if you are using bzImage for capturing the crash dump, leave + the value here unchanged to 0x100000 and set CONFIG_RELOCATABLE=y. + Otherwise if you plan to use vmlinux for capturing the crash dump + change this value to start of the reserved region (Typically 16MB + 0x1000000). In other words, it can be set based on the "X" value as + specified in the "crashkernel=YM@XM" command line boot parameter + passed to the panic-ed kernel. Typically this parameter is set as + crashkernel=64M@16M. Please take a look at + Documentation/kdump/kdump.txt for more details about crash dumps. Usage of bzImage for capturing the crash dump is recommended as one does not have to build two kernels. Same kernel can be used @@ -1505,8 +1508,8 @@ config PHYSICAL_START Don't change this unless you know what you are doing. config RELOCATABLE - bool "Build a relocatable kernel" - default y + bool "Build a relocatable kernel (EXPERIMENTAL)" + depends on EXPERIMENTAL ---help--- This builds a kernel image that retains relocation information so it can be loaded someplace besides the default 1MB. @@ -1521,16 +1524,12 @@ config RELOCATABLE it has been loaded at and the compile time physical address (CONFIG_PHYSICAL_START) is ignored. -# Relocation on x86-32 needs some additional build support -config X86_NEED_RELOCS - def_bool y - depends on X86_32 && RELOCATABLE - config PHYSICAL_ALIGN hex prompt "Alignment value to which kernel should be aligned" if X86_32 - default "0x1000000" - range 0x2000 0x1000000 + default "0x100000" if X86_32 + default "0x200000" if X86_64 + range 0x2000 0x400000 ---help--- This value puts the alignment restrictions on physical address where kernel is loaded and run from. Kernel is compiled for an diff --git a/trunk/arch/x86/Makefile b/trunk/arch/x86/Makefile index edbd0ca62067..8c86b72afdc2 100644 --- a/trunk/arch/x86/Makefile +++ b/trunk/arch/x86/Makefile @@ -7,6 +7,8 @@ else KBUILD_DEFCONFIG := $(ARCH)_defconfig endif +core-$(CONFIG_KVM) += arch/x86/kvm/ + # BITS is used as extension for files which are available in a 32 bit # and a 64 bit version to simplify shared Makefiles. # e.g.: obj-y += foo_$(BITS).o @@ -116,8 +118,21 @@ head-y += arch/x86/kernel/init_task.o libs-y += arch/x86/lib/ -# See arch/x86/Kbuild for content of core part of the kernel -core-y += arch/x86/ +# Sub architecture files that needs linking first +core-y += $(fcore-y) + +# Xen paravirtualization support +core-$(CONFIG_XEN) += arch/x86/xen/ + +# lguest paravirtualization support +core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/ + +core-y += arch/x86/kernel/ +core-y += arch/x86/mm/ + +core-y += arch/x86/crypto/ +core-y += arch/x86/vdso/ +core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/ # drivers-y are linked after core-y drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/ diff --git a/trunk/arch/x86/boot/.gitignore b/trunk/arch/x86/boot/.gitignore index 851fe936d242..172cf8a98bdd 100644 --- a/trunk/arch/x86/boot/.gitignore +++ b/trunk/arch/x86/boot/.gitignore @@ -3,8 +3,6 @@ bzImage cpustr.h mkcpustr offsets.h -voffset.h -zoffset.h setup setup.bin setup.elf diff --git a/trunk/arch/x86/boot/Makefile b/trunk/arch/x86/boot/Makefile index 8d16ada25048..6633b6e7505a 100644 --- a/trunk/arch/x86/boot/Makefile +++ b/trunk/arch/x86/boot/Makefile @@ -26,10 +26,9 @@ targets := vmlinux.bin setup.bin setup.elf bzImage targets += fdimage fdimage144 fdimage288 image.iso mtools.conf subdir- := compressed -setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o edd.o +setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o setup-y += header.o main.o mca.o memory.o pm.o pmjump.o -setup-y += printf.o regs.o string.o tty.o video.o video-mode.o -setup-y += version.o +setup-y += printf.o string.o tty.o video.o video-mode.o version.o setup-$(CONFIG_X86_APM_BOOT) += apm.o # The link order of the video-*.o modules can matter. In particular, @@ -87,27 +86,19 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) -sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p' +sed-offsets := -e 's/^00*/0/' \ + -e 's/^\([0-9a-fA-F]*\) . \(input_data\|input_data_end\)$$/\#define \2 0x\1/p' -quiet_cmd_voffset = VOFFSET $@ - cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@ +quiet_cmd_offsets = OFFSETS $@ + cmd_offsets = $(NM) $< | sed -n $(sed-offsets) > $@ -targets += voffset.h -$(obj)/voffset.h: vmlinux FORCE - $(call if_changed,voffset) - -sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' - -quiet_cmd_zoffset = ZOFFSET $@ - cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ - -targets += zoffset.h -$(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE - $(call if_changed,zoffset) +$(obj)/offsets.h: $(obj)/compressed/vmlinux FORCE + $(call if_changed,offsets) +targets += offsets.h AFLAGS_header.o += -I$(obj) -$(obj)/header.o: $(obj)/voffset.h $(obj)/zoffset.h +$(obj)/header.o: $(obj)/offsets.h LDFLAGS_setup.elf := -T $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE diff --git a/trunk/arch/x86/boot/a20.c b/trunk/arch/x86/boot/a20.c index 64a31a6d751a..7c19ce8c2442 100644 --- a/trunk/arch/x86/boot/a20.c +++ b/trunk/arch/x86/boot/a20.c @@ -2,7 +2,7 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007-2008 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin + * Copyright 2009 Intel Corporation * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -90,11 +90,8 @@ static int a20_test_long(void) static void enable_a20_bios(void) { - struct biosregs ireg; - - initregs(&ireg); - ireg.ax = 0x2401; - intcall(0x15, &ireg, NULL); + asm volatile("pushfl; int $0x15; popfl" + : : "a" ((u16)0x2401)); } static void enable_a20_kbc(void) diff --git a/trunk/arch/x86/boot/apm.c b/trunk/arch/x86/boot/apm.c index ee274834ea8b..7aa6033001f9 100644 --- a/trunk/arch/x86/boot/apm.c +++ b/trunk/arch/x86/boot/apm.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * Original APM BIOS checking by Stephen Rothwell, May 1994 * (sfr@canb.auug.org.au) @@ -20,56 +19,75 @@ int query_apm_bios(void) { - struct biosregs ireg, oreg; + u16 ax, bx, cx, dx, di; + u32 ebx, esi; + u8 err; /* APM BIOS installation check */ - initregs(&ireg); - ireg.ah = 0x53; - intcall(0x15, &ireg, &oreg); + ax = 0x5300; + bx = cx = 0; + asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp ; setc %0" + : "=d" (err), "+a" (ax), "+b" (bx), "+c" (cx) + : : "esi", "edi"); - if (oreg.flags & X86_EFLAGS_CF) + if (err) return -1; /* No APM BIOS */ - if (oreg.bx != 0x504d) /* "PM" signature */ + if (bx != 0x504d) /* "PM" signature */ return -1; - if (!(oreg.cx & 0x02)) /* 32 bits supported? */ + if (!(cx & 0x02)) /* 32 bits supported? */ return -1; /* Disconnect first, just in case */ - ireg.al = 0x04; - intcall(0x15, &ireg, NULL); + ax = 0x5304; + bx = 0; + asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp" + : "+a" (ax), "+b" (bx) + : : "ecx", "edx", "esi", "edi"); + + /* Paranoia */ + ebx = esi = 0; + cx = dx = di = 0; /* 32-bit connect */ - ireg.al = 0x03; - intcall(0x15, &ireg, &oreg); - - boot_params.apm_bios_info.cseg = oreg.ax; - boot_params.apm_bios_info.offset = oreg.ebx; - boot_params.apm_bios_info.cseg_16 = oreg.cx; - boot_params.apm_bios_info.dseg = oreg.dx; - boot_params.apm_bios_info.cseg_len = oreg.si; - boot_params.apm_bios_info.cseg_16_len = oreg.hsi; - boot_params.apm_bios_info.dseg_len = oreg.di; - - if (oreg.flags & X86_EFLAGS_CF) + asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp ; setc %6" + : "=a" (ax), "+b" (ebx), "+c" (cx), "+d" (dx), + "+S" (esi), "+D" (di), "=m" (err) + : "a" (0x5303)); + + boot_params.apm_bios_info.cseg = ax; + boot_params.apm_bios_info.offset = ebx; + boot_params.apm_bios_info.cseg_16 = cx; + boot_params.apm_bios_info.dseg = dx; + boot_params.apm_bios_info.cseg_len = (u16)esi; + boot_params.apm_bios_info.cseg_16_len = esi >> 16; + boot_params.apm_bios_info.dseg_len = di; + + if (err) return -1; /* Redo the installation check as the 32-bit connect; some BIOSes return different flags this way... */ - ireg.al = 0x00; - intcall(0x15, &ireg, &oreg); + ax = 0x5300; + bx = cx = 0; + asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp ; setc %0" + : "=d" (err), "+a" (ax), "+b" (bx), "+c" (cx) + : : "esi", "edi"); - if ((oreg.eflags & X86_EFLAGS_CF) || oreg.bx != 0x504d) { + if (err || bx != 0x504d) { /* Failure with 32-bit connect, try to disconect and ignore */ - ireg.al = 0x04; - intcall(0x15, &ireg, NULL); + ax = 0x5304; + bx = 0; + asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp" + : "+a" (ax), "+b" (bx) + : : "ecx", "edx", "esi", "edi"); return -1; } - boot_params.apm_bios_info.version = oreg.ax; - boot_params.apm_bios_info.flags = oreg.cx; + boot_params.apm_bios_info.version = ax; + boot_params.apm_bios_info.flags = cx; return 0; } diff --git a/trunk/arch/x86/boot/bioscall.S b/trunk/arch/x86/boot/bioscall.S deleted file mode 100644 index 507793739ea5..000000000000 --- a/trunk/arch/x86/boot/bioscall.S +++ /dev/null @@ -1,82 +0,0 @@ -/* ----------------------------------------------------------------------- - * - * Copyright 2009 Intel Corporation; author H. Peter Anvin - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2 or (at your - * option) any later version; incorporated herein by reference. - * - * ----------------------------------------------------------------------- */ - -/* - * "Glove box" for BIOS calls. Avoids the constant problems with BIOSes - * touching registers they shouldn't be. - */ - - .code16 - .text - .globl intcall - .type intcall, @function -intcall: - /* Self-modify the INT instruction. Ugly, but works. */ - cmpb %al, 3f - je 1f - movb %al, 3f - jmp 1f /* Synchronize pipeline */ -1: - /* Save state */ - pushfl - pushw %fs - pushw %gs - pushal - - /* Copy input state to stack frame */ - subw $44, %sp - movw %dx, %si - movw %sp, %di - movw $11, %cx - rep; movsd - - /* Pop full state from the stack */ - popal - popw %gs - popw %fs - popw %es - popw %ds - popfl - - /* Actual INT */ - .byte 0xcd /* INT opcode */ -3: .byte 0 - - /* Push full state to the stack */ - pushfl - pushw %ds - pushw %es - pushw %fs - pushw %gs - pushal - - /* Re-establish C environment invariants */ - cld - movzwl %sp, %esp - movw %cs, %ax - movw %ax, %ds - movw %ax, %es - - /* Copy output state from stack frame */ - movw 68(%esp), %di /* Original %cx == 3rd argument */ - andw %di, %di - jz 4f - movw %sp, %si - movw $11, %cx - rep; movsd -4: addw $44, %sp - - /* Restore state and return */ - popal - popw %gs - popw %fs - popfl - retl - .size intcall, .-intcall diff --git a/trunk/arch/x86/boot/boot.h b/trunk/arch/x86/boot/boot.h index 98239d2658f2..7b2692e897e5 100644 --- a/trunk/arch/x86/boot/boot.h +++ b/trunk/arch/x86/boot/boot.h @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -27,7 +26,6 @@ #include #include "bitops.h" #include -#include /* Useful macros */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) @@ -243,49 +241,6 @@ int enable_a20(void); /* apm.c */ int query_apm_bios(void); -/* bioscall.c */ -struct biosregs { - union { - struct { - u32 edi; - u32 esi; - u32 ebp; - u32 _esp; - u32 ebx; - u32 edx; - u32 ecx; - u32 eax; - u32 _fsgs; - u32 _dses; - u32 eflags; - }; - struct { - u16 di, hdi; - u16 si, hsi; - u16 bp, hbp; - u16 _sp, _hsp; - u16 bx, hbx; - u16 dx, hdx; - u16 cx, hcx; - u16 ax, hax; - u16 gs, fs; - u16 es, ds; - u16 flags, hflags; - }; - struct { - u8 dil, dih, edi2, edi3; - u8 sil, sih, esi2, esi3; - u8 bpl, bph, ebp2, ebp3; - u8 _spl, _sph, _esp2, _esp3; - u8 bl, bh, ebx2, ebx3; - u8 dl, dh, edx2, edx3; - u8 cl, ch, ecx2, ecx3; - u8 al, ah, eax2, eax3; - }; - }; -}; -void intcall(u8 int_no, const struct biosregs *ireg, struct biosregs *oreg); - /* cmdline.c */ int cmdline_find_option(const char *option, char *buffer, int bufsize); int cmdline_find_option_bool(const char *option); @@ -324,9 +279,6 @@ int sprintf(char *buf, const char *fmt, ...); int vsprintf(char *buf, const char *fmt, va_list args); int printf(const char *fmt, ...); -/* regs.c */ -void initregs(struct biosregs *regs); - /* string.c */ int strcmp(const char *str1, const char *str2); size_t strnlen(const char *s, size_t maxlen); diff --git a/trunk/arch/x86/boot/compressed/.gitignore b/trunk/arch/x86/boot/compressed/.gitignore index 4a46fab7162e..63eff3b04d01 100644 --- a/trunk/arch/x86/boot/compressed/.gitignore +++ b/trunk/arch/x86/boot/compressed/.gitignore @@ -1,6 +1,3 @@ relocs vmlinux.bin.all vmlinux.relocs -vmlinux.lds -mkpiggy -piggy.S diff --git a/trunk/arch/x86/boot/compressed/Makefile b/trunk/arch/x86/boot/compressed/Makefile index 49c8a4c37d7c..65551c9f8571 100644 --- a/trunk/arch/x86/boot/compressed/Makefile +++ b/trunk/arch/x86/boot/compressed/Makefile @@ -19,9 +19,7 @@ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS_vmlinux := -T -hostprogs-y := mkpiggy - -$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/piggy.o FORCE +$(obj)/vmlinux: $(src)/vmlinux_$(BITS).lds $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/piggy.o FORCE $(call if_changed,ld) @: @@ -31,7 +29,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE targets += vmlinux.bin.all vmlinux.relocs relocs -hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs +hostprogs-$(CONFIG_X86_32) += relocs quiet_cmd_relocs = RELOCS $@ cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $< @@ -39,22 +37,46 @@ $(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE $(call if_changed,relocs) vmlinux.bin.all-y := $(obj)/vmlinux.bin -vmlinux.bin.all-$(CONFIG_X86_NEED_RELOCS) += $(obj)/vmlinux.relocs +vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs +quiet_cmd_relocbin = BUILD $@ + cmd_relocbin = cat $(filter-out FORCE,$^) > $@ +$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE + $(call if_changed,relocbin) + +ifeq ($(CONFIG_X86_32),y) -$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE +ifdef CONFIG_RELOCATABLE +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE + $(call if_changed,lzma) +else +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE $(call if_changed,bzip2) -$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE $(call if_changed,lzma) +endif +LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T -suffix-$(CONFIG_KERNEL_GZIP) := gz -suffix-$(CONFIG_KERNEL_BZIP2) := bz2 -suffix-$(CONFIG_KERNEL_LZMA) := lzma +else -quiet_cmd_mkpiggy = MKPIGGY $@ - cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + +LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T +endif -targets += piggy.S -$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE - $(call if_changed,mkpiggy) +suffix_$(CONFIG_KERNEL_GZIP) = gz +suffix_$(CONFIG_KERNEL_BZIP2) = bz2 +suffix_$(CONFIG_KERNEL_LZMA) = lzma + +$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE + $(call if_changed,ld) diff --git a/trunk/arch/x86/boot/compressed/head_32.S b/trunk/arch/x86/boot/compressed/head_32.S index 75e4f001e706..3a8a866fb2e2 100644 --- a/trunk/arch/x86/boot/compressed/head_32.S +++ b/trunk/arch/x86/boot/compressed/head_32.S @@ -12,16 +12,16 @@ * the page directory. [According to comments etc elsewhere on a compressed * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] * - * Page 0 is deliberately kept safe, since System Management Mode code in + * Page 0 is deliberately kept safe, since System Management Mode code in * laptops may need to access the BIOS data stored there. This is also - * useful for future device drivers that either access the BIOS via VM86 + * useful for future device drivers that either access the BIOS via VM86 * mode. */ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ - .text +.text #include #include @@ -29,151 +29,161 @@ #include #include - .section ".text.head","ax",@progbits +.section ".text.head","ax",@progbits ENTRY(startup_32) cld - /* - * Test KEEP_SEGMENTS flag to see if the bootloader is asking - * us to not reload segments - */ - testb $(1<<6), BP_loadflags(%esi) - jnz 1f + /* test KEEP_SEGMENTS flag to see if the bootloader is asking + * us to not reload segments */ + testb $(1<<6), BP_loadflags(%esi) + jnz 1f cli - movl $__BOOT_DS, %eax - movl %eax, %ds - movl %eax, %es - movl %eax, %fs - movl %eax, %gs - movl %eax, %ss + movl $(__BOOT_DS),%eax + movl %eax,%ds + movl %eax,%es + movl %eax,%fs + movl %eax,%gs + movl %eax,%ss 1: -/* - * Calculate the delta between where we were compiled to run +/* Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ - leal (BP_scratch+4)(%esi), %esp - call 1f -1: popl %ebp - subl $1b, %ebp + leal (0x1e4+4)(%esi), %esp + call 1f +1: popl %ebp + subl $1b, %ebp -/* - * %ebp contains the address we are loaded at by the boot loader and %ebx +/* %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE - movl %ebp, %ebx - movl BP_kernel_alignment(%esi), %eax - decl %eax - addl %eax, %ebx - notl %eax - andl %eax, %ebx + movl %ebp, %ebx + addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx + andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx #else - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $LOAD_PHYSICAL_ADDR, %ebx #endif - /* Target address to relocate to for decompression */ - addl $z_extract_offset, %ebx - - /* Set up the stack */ - leal boot_stack_end(%ebx), %esp - - /* Zero EFLAGS */ - pushl $0 - popfl - -/* - * Copy the compressed kernel to the end of our buffer + /* Replace the compressed data size with the uncompressed size */ + subl input_len(%ebp), %ebx + movl output_len(%ebp), %eax + addl %eax, %ebx + /* Add 8 bytes for every 32K input block */ + shrl $12, %eax + addl %eax, %ebx + /* Add 32K + 18 bytes of extra slack */ + addl $(32768 + 18), %ebx + /* Align on a 4K boundary */ + addl $4095, %ebx + andl $~4095, %ebx + +/* Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ - pushl %esi - leal (_bss-4)(%ebp), %esi - leal (_bss-4)(%ebx), %edi - movl $(_bss - startup_32), %ecx - shrl $2, %ecx + pushl %esi + leal _end(%ebp), %esi + leal _end(%ebx), %edi + movl $(_end - startup_32), %ecx std - rep movsl + rep + movsb cld - popl %esi + popl %esi + +/* Compute the kernel start address. + */ +#ifdef CONFIG_RELOCATABLE + addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp + andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp +#else + movl $LOAD_PHYSICAL_ADDR, %ebp +#endif /* * Jump to the relocated address. */ - leal relocated(%ebx), %eax - jmp *%eax + leal relocated(%ebx), %eax + jmp *%eax ENDPROC(startup_32) - .text +.section ".text" relocated: /* - * Clear BSS (stack is currently empty) + * Clear BSS + */ + xorl %eax,%eax + leal _edata(%ebx),%edi + leal _end(%ebx), %ecx + subl %edi,%ecx + cld + rep + stosb + +/* + * Setup the stack for the decompressor */ - xorl %eax, %eax - leal _bss(%ebx), %edi - leal _ebss(%ebx), %ecx - subl %edi, %ecx - shrl $2, %ecx - rep stosl + leal boot_stack_end(%ebx), %esp /* * Do the decompression, and jump to the new kernel.. */ - leal z_extract_offset_negative(%ebx), %ebp - /* push arguments for decompress_kernel: */ - pushl %ebp /* output address */ - pushl $z_input_len /* input_len */ - leal input_data(%ebx), %eax - pushl %eax /* input_data */ - leal boot_heap(%ebx), %eax - pushl %eax /* heap area */ - pushl %esi /* real mode pointer */ - call decompress_kernel - addl $20, %esp + movl output_len(%ebx), %eax + pushl %eax + # push arguments for decompress_kernel: + pushl %ebp # output address + movl input_len(%ebx), %eax + pushl %eax # input_len + leal input_data(%ebx), %eax + pushl %eax # input_data + leal boot_heap(%ebx), %eax + pushl %eax # heap area + pushl %esi # real mode pointer + call decompress_kernel + addl $20, %esp + popl %ecx #if CONFIG_RELOCATABLE -/* - * Find the address of the relocations. +/* Find the address of the relocations. */ - leal z_output_len(%ebp), %edi + movl %ebp, %edi + addl %ecx, %edi -/* - * Calculate the delta between where vmlinux was compiled to run +/* Calculate the delta between where vmlinux was compiled to run * and where it was actually loaded. */ - movl %ebp, %ebx - subl $LOAD_PHYSICAL_ADDR, %ebx - jz 2f /* Nothing to be done if loaded at compiled addr. */ + movl %ebp, %ebx + subl $LOAD_PHYSICAL_ADDR, %ebx + jz 2f /* Nothing to be done if loaded at compiled addr. */ /* * Process relocations. */ -1: subl $4, %edi - movl (%edi), %ecx - testl %ecx, %ecx - jz 2f - addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) - jmp 1b +1: subl $4, %edi + movl 0(%edi), %ecx + testl %ecx, %ecx + jz 2f + addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) + jmp 1b 2: #endif /* * Jump to the decompressed kernel. */ - xorl %ebx, %ebx - jmp *%ebp + xorl %ebx,%ebx + jmp *%ebp -/* - * Stack and heap for uncompression - */ - .bss - .balign 4 +.bss +/* Stack and heap for uncompression */ +.balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: diff --git a/trunk/arch/x86/boot/compressed/head_64.S b/trunk/arch/x86/boot/compressed/head_64.S index f62c284db9eb..ed4a82948002 100644 --- a/trunk/arch/x86/boot/compressed/head_64.S +++ b/trunk/arch/x86/boot/compressed/head_64.S @@ -21,8 +21,8 @@ /* * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ - .code32 - .text +.code32 +.text #include #include @@ -33,14 +33,12 @@ #include #include - .section ".text.head" +.section ".text.head" .code32 ENTRY(startup_32) cld - /* - * Test KEEP_SEGMENTS flag to see if the bootloader is asking - * us to not reload segments - */ + /* test KEEP_SEGMENTS flag to see if the bootloader is asking + * us to not reload segments */ testb $(1<<6), BP_loadflags(%esi) jnz 1f @@ -51,15 +49,14 @@ ENTRY(startup_32) movl %eax, %ss 1: -/* - * Calculate the delta between where we were compiled to run +/* Calculate the delta between where we were compiled to run * at and where we were actually loaded at. This can only be done * with a short local call on x86. Nothing else will tell us what * address we are running at. The reserved chunk of the real-mode * data at 0x1e4 (defined as a scratch field) are used as the stack * for this calculation. Only 4 bytes are needed. */ - leal (BP_scratch+4)(%esi), %esp + leal (0x1e4+4)(%esi), %esp call 1f 1: popl %ebp subl $1b, %ebp @@ -73,28 +70,32 @@ ENTRY(startup_32) testl %eax, %eax jnz no_longmode -/* - * Compute the delta between where we were compiled to run at +/* Compute the delta between where we were compiled to run at * and where the code will actually run at. - * - * %ebp contains the address we are loaded at by the boot loader and %ebx + */ +/* %ebp contains the address we are loaded at by the boot loader and %ebx * contains the address where we should move the kernel image temporarily * for safe in-place decompression. */ #ifdef CONFIG_RELOCATABLE movl %ebp, %ebx - movl BP_kernel_alignment(%esi), %eax - decl %eax - addl %eax, %ebx - notl %eax - andl %eax, %ebx + addl $(PMD_PAGE_SIZE -1), %ebx + andl $PMD_PAGE_MASK, %ebx #else - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $CONFIG_PHYSICAL_START, %ebx #endif - /* Target address to relocate to for decompression */ - addl $z_extract_offset, %ebx + /* Replace the compressed data size with the uncompressed size */ + subl input_len(%ebp), %ebx + movl output_len(%ebp), %eax + addl %eax, %ebx + /* Add 8 bytes for every 32K input block */ + shrl $12, %eax + addl %eax, %ebx + /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */ + addl $(32768 + 18 + 4095), %ebx + andl $~4095, %ebx /* * Prepare for entering 64 bit mode @@ -113,7 +114,7 @@ ENTRY(startup_32) /* * Build early 4G boot pagetable */ - /* Initialize Page tables to 0 */ + /* Initialize Page tables to 0*/ leal pgtable(%ebx), %edi xorl %eax, %eax movl $((4096*6)/4), %ecx @@ -154,8 +155,7 @@ ENTRY(startup_32) btsl $_EFER_LME, %eax wrmsr - /* - * Setup for the jump to 64bit mode + /* Setup for the jump to 64bit mode * * When the jump is performend we will be in long mode but * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1 @@ -184,8 +184,7 @@ no_longmode: #include "../../kernel/verify_cpu_64.S" - /* - * Be careful here startup_64 needs to be at a predictable + /* Be careful here startup_64 needs to be at a predictable * address so I can export it in an ELF header. Bootloaders * should look at the ELF header to find this address, as * it may change in the future. @@ -193,8 +192,7 @@ no_longmode: .code64 .org 0x200 ENTRY(startup_64) - /* - * We come here either from startup_32 or directly from a + /* We come here either from startup_32 or directly from a * 64bit bootloader. If we come here from a bootloader we depend on * an identity mapped page table being provied that maps our * entire text+data+bss and hopefully all of memory. @@ -211,54 +209,50 @@ ENTRY(startup_64) movl $0x20, %eax ltr %ax - /* - * Compute the decompressed kernel start address. It is where + /* Compute the decompressed kernel start address. It is where * we were loaded at aligned to a 2M boundary. %rbp contains the * decompressed kernel start address. * * If it is a relocatable kernel then decompress and run the kernel * from load address aligned to 2MB addr, otherwise decompress and - * run the kernel from LOAD_PHYSICAL_ADDR - * - * We cannot rely on the calculation done in 32-bit mode, since we - * may have been invoked via the 64-bit entry point. + * run the kernel from CONFIG_PHYSICAL_START */ /* Start with the delta to where the kernel will run at. */ #ifdef CONFIG_RELOCATABLE leaq startup_32(%rip) /* - $startup_32 */, %rbp - movl BP_kernel_alignment(%rsi), %eax - decl %eax - addq %rax, %rbp - notq %rax - andq %rax, %rbp + addq $(PMD_PAGE_SIZE - 1), %rbp + andq $PMD_PAGE_MASK, %rbp + movq %rbp, %rbx #else - movq $LOAD_PHYSICAL_ADDR, %rbp + movq $CONFIG_PHYSICAL_START, %rbp + movq %rbp, %rbx #endif - /* Target address to relocate to for decompression */ - leaq z_extract_offset(%rbp), %rbx - - /* Set up the stack */ - leaq boot_stack_end(%rbx), %rsp - - /* Zero EFLAGS */ - pushq $0 - popfq - -/* - * Copy the compressed kernel to the end of our buffer + /* Replace the compressed data size with the uncompressed size */ + movl input_len(%rip), %eax + subq %rax, %rbx + movl output_len(%rip), %eax + addq %rax, %rbx + /* Add 8 bytes for every 32K input block */ + shrq $12, %rax + addq %rax, %rbx + /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */ + addq $(32768 + 18 + 4095), %rbx + andq $~4095, %rbx + +/* Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ - pushq %rsi - leaq (_bss-8)(%rip), %rsi - leaq (_bss-8)(%rbx), %rdi - movq $_bss /* - $startup_32 */, %rcx - shrq $3, %rcx - std - rep movsq - cld - popq %rsi + leaq _end_before_pgt(%rip), %r8 + leaq _end_before_pgt(%rbx), %r9 + movq $_end_before_pgt /* - $startup_32 */, %rcx +1: subq $8, %r8 + subq $8, %r9 + movq 0(%r8), %rax + movq %rax, 0(%r9) + subq $8, %rcx + jnz 1b /* * Jump to the relocated address. @@ -266,28 +260,37 @@ ENTRY(startup_64) leaq relocated(%rbx), %rax jmp *%rax - .text +.section ".text" relocated: /* - * Clear BSS (stack is currently empty) + * Clear BSS */ - xorl %eax, %eax - leaq _bss(%rip), %rdi - leaq _ebss(%rip), %rcx + xorq %rax, %rax + leaq _edata(%rbx), %rdi + leaq _end_before_pgt(%rbx), %rcx subq %rdi, %rcx - shrq $3, %rcx - rep stosq + cld + rep + stosb + + /* Setup the stack */ + leaq boot_stack_end(%rip), %rsp + + /* zero EFLAGS after setting rsp */ + pushq $0 + popfq /* * Do the decompression, and jump to the new kernel.. */ - pushq %rsi /* Save the real mode argument */ - movq %rsi, %rdi /* real mode address */ - leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ - leaq input_data(%rip), %rdx /* input_data */ - movl $z_input_len, %ecx /* input_len */ - movq %rbp, %r8 /* output target address */ + pushq %rsi # Save the real mode argument + movq %rsi, %rdi # real mode address + leaq boot_heap(%rip), %rsi # malloc area for uncompression + leaq input_data(%rip), %rdx # input_data + movl input_len(%rip), %eax + movq %rax, %rcx # input_len + movq %rbp, %r8 # output call decompress_kernel popq %rsi @@ -308,21 +311,11 @@ gdt: .quad 0x0000000000000000 /* TS continued */ gdt_end: -/* - * Stack and heap for uncompression - */ - .bss - .balign 4 +.bss +/* Stack and heap for uncompression */ +.balign 4 boot_heap: .fill BOOT_HEAP_SIZE, 1, 0 boot_stack: .fill BOOT_STACK_SIZE, 1, 0 boot_stack_end: - -/* - * Space for page tables (not in .bss so not zeroed) - */ - .section ".pgtable","a",@nobits - .balign 4096 -pgtable: - .fill 6*4096, 1, 0 diff --git a/trunk/arch/x86/boot/compressed/misc.c b/trunk/arch/x86/boot/compressed/misc.c index 842b2a36174a..e45be73684ff 100644 --- a/trunk/arch/x86/boot/compressed/misc.c +++ b/trunk/arch/x86/boot/compressed/misc.c @@ -325,18 +325,20 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; - if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) - error("Destination address inappropriately aligned"); #ifdef CONFIG_X86_64 - if (heap > 0x3fffffffffffUL) + if ((unsigned long)output & (__KERNEL_ALIGN - 1)) + error("Destination address not 2M aligned"); + if ((unsigned long)output >= 0xffffffffffUL) error("Destination address too large"); #else + if ((u32)output & (CONFIG_PHYSICAL_ALIGN - 1)) + error("Destination address not CONFIG_PHYSICAL_ALIGN aligned"); if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) error("Destination address too large"); -#endif #ifndef CONFIG_RELOCATABLE - if ((unsigned long)output != LOAD_PHYSICAL_ADDR) + if ((u32)output != LOAD_PHYSICAL_ADDR) error("Wrong destination address"); +#endif #endif if (!quiet) diff --git a/trunk/arch/x86/boot/compressed/mkpiggy.c b/trunk/arch/x86/boot/compressed/mkpiggy.c deleted file mode 100644 index bcbd36c41432..000000000000 --- a/trunk/arch/x86/boot/compressed/mkpiggy.c +++ /dev/null @@ -1,97 +0,0 @@ -/* ----------------------------------------------------------------------- * - * - * Copyright (C) 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * H. Peter Anvin - * - * ----------------------------------------------------------------------- */ - -/* - * Compute the desired load offset from a compressed program; outputs - * a small assembly wrapper with the appropriate symbols defined. - */ - -#include -#include -#include -#include - -static uint32_t getle32(const void *p) -{ - const uint8_t *cp = p; - - return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) + - ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24); -} - -int main(int argc, char *argv[]) -{ - uint32_t olen; - long ilen; - unsigned long offs; - FILE *f; - - if (argc < 2) { - fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); - return 1; - } - - /* Get the information for the compressed kernel image first */ - - f = fopen(argv[1], "r"); - if (!f) { - perror(argv[1]); - return 1; - } - - - if (fseek(f, -4L, SEEK_END)) { - perror(argv[1]); - } - fread(&olen, sizeof olen, 1, f); - ilen = ftell(f); - olen = getle32(&olen); - fclose(f); - - /* - * Now we have the input (compressed) and output (uncompressed) - * sizes, compute the necessary decompression offset... - */ - - offs = (olen > ilen) ? olen - ilen : 0; - offs += olen >> 12; /* Add 8 bytes for each 32K block */ - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ - offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ - - printf(".section \".rodata.compressed\",\"a\",@progbits\n"); - printf(".globl z_input_len\n"); - printf("z_input_len = %lu\n", ilen); - printf(".globl z_output_len\n"); - printf("z_output_len = %lu\n", (unsigned long)olen); - printf(".globl z_extract_offset\n"); - printf("z_extract_offset = 0x%lx\n", offs); - /* z_extract_offset_negative allows simplification of head_32.S */ - printf(".globl z_extract_offset_negative\n"); - printf("z_extract_offset_negative = -0x%lx\n", offs); - - printf(".globl input_data, input_data_end\n"); - printf("input_data:\n"); - printf(".incbin \"%s\"\n", argv[1]); - printf("input_data_end:\n"); - - return 0; -} diff --git a/trunk/arch/x86/boot/compressed/vmlinux.scr b/trunk/arch/x86/boot/compressed/vmlinux.scr new file mode 100644 index 000000000000..f02382ae5c48 --- /dev/null +++ b/trunk/arch/x86/boot/compressed/vmlinux.scr @@ -0,0 +1,10 @@ +SECTIONS +{ + .rodata.compressed : { + input_len = .; + LONG(input_data_end - input_data) input_data = .; + *(.data) + output_len = . - 4; + input_data_end = .; + } +} diff --git a/trunk/arch/x86/boot/compressed/vmlinux_32.lds b/trunk/arch/x86/boot/compressed/vmlinux_32.lds new file mode 100644 index 000000000000..bb3c48379c40 --- /dev/null +++ b/trunk/arch/x86/boot/compressed/vmlinux_32.lds @@ -0,0 +1,43 @@ +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_ARCH(i386) +ENTRY(startup_32) +SECTIONS +{ + /* Be careful parts of head_32.S assume startup_32 is at + * address 0. + */ + . = 0; + .text.head : { + _head = . ; + *(.text.head) + _ehead = . ; + } + .rodata.compressed : { + *(.rodata.compressed) + } + .text : { + _text = .; /* Text */ + *(.text) + *(.text.*) + _etext = . ; + } + .rodata : { + _rodata = . ; + *(.rodata) /* read-only data */ + *(.rodata.*) + _erodata = . ; + } + .data : { + _data = . ; + *(.data) + *(.data.*) + _edata = . ; + } + .bss : { + _bss = . ; + *(.bss) + *(.bss.*) + *(COMMON) + _end = . ; + } +} diff --git a/trunk/arch/x86/boot/compressed/vmlinux.lds.S b/trunk/arch/x86/boot/compressed/vmlinux_64.lds similarity index 57% rename from trunk/arch/x86/boot/compressed/vmlinux.lds.S rename to trunk/arch/x86/boot/compressed/vmlinux_64.lds index cc353e1b3ffd..bef1ac891bce 100644 --- a/trunk/arch/x86/boot/compressed/vmlinux.lds.S +++ b/trunk/arch/x86/boot/compressed/vmlinux_64.lds @@ -1,17 +1,6 @@ -OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) - -#undef i386 - -#include - -#ifdef CONFIG_X86_64 +OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") OUTPUT_ARCH(i386:x86-64) ENTRY(startup_64) -#else -OUTPUT_ARCH(i386) -ENTRY(startup_32) -#endif - SECTIONS { /* Be careful parts of head_64.S assume startup_32 is at @@ -44,22 +33,16 @@ SECTIONS *(.data.*) _edata = . ; } - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); .bss : { _bss = . ; *(.bss) *(.bss.*) *(COMMON) - . = ALIGN(8); /* For convenience during zeroing */ + . = ALIGN(8); + _end_before_pgt = . ; + . = ALIGN(4096); + pgtable = . ; + . = . + 4096 * 6; _ebss = .; } -#ifdef CONFIG_X86_64 - . = ALIGN(PAGE_SIZE); - .pgtable : { - _pgtable = . ; - *(.pgtable) - _epgtable = . ; - } -#endif - _end = .; } diff --git a/trunk/arch/x86/boot/edd.c b/trunk/arch/x86/boot/edd.c index c501a5b466f8..1aae8f3e5ca1 100644 --- a/trunk/arch/x86/boot/edd.c +++ b/trunk/arch/x86/boot/edd.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -23,17 +22,17 @@ */ static int read_mbr(u8 devno, void *buf) { - struct biosregs ireg, oreg; + u16 ax, bx, cx, dx; - initregs(&ireg); - ireg.ax = 0x0201; /* Legacy Read, one sector */ - ireg.cx = 0x0001; /* Sector 0-0-1 */ - ireg.dl = devno; - ireg.bx = (size_t)buf; + ax = 0x0201; /* Legacy Read, one sector */ + cx = 0x0001; /* Sector 0-0-1 */ + dx = devno; + bx = (size_t)buf; + asm volatile("pushfl; stc; int $0x13; setc %%al; popfl" + : "+a" (ax), "+c" (cx), "+d" (dx), "+b" (bx) + : : "esi", "edi", "memory"); - intcall(0x13, &ireg, &oreg); - - return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */ + return -(u8)ax; /* 0 or -1 */ } static u32 read_mbr_sig(u8 devno, struct edd_info *ei, u32 *mbrsig) @@ -73,46 +72,56 @@ static u32 read_mbr_sig(u8 devno, struct edd_info *ei, u32 *mbrsig) static int get_edd_info(u8 devno, struct edd_info *ei) { - struct biosregs ireg, oreg; + u16 ax, bx, cx, dx, di; memset(ei, 0, sizeof *ei); /* Check Extensions Present */ - initregs(&ireg); - ireg.ah = 0x41; - ireg.bx = EDDMAGIC1; - ireg.dl = devno; - intcall(0x13, &ireg, &oreg); + ax = 0x4100; + bx = EDDMAGIC1; + dx = devno; + asm("pushfl; stc; int $0x13; setc %%al; popfl" + : "+a" (ax), "+b" (bx), "=c" (cx), "+d" (dx) + : : "esi", "edi"); - if (oreg.eflags & X86_EFLAGS_CF) + if ((u8)ax) return -1; /* No extended information */ - if (oreg.bx != EDDMAGIC2) + if (bx != EDDMAGIC2) return -1; ei->device = devno; - ei->version = oreg.ah; /* EDD version number */ - ei->interface_support = oreg.cx; /* EDD functionality subsets */ + ei->version = ax >> 8; /* EDD version number */ + ei->interface_support = cx; /* EDD functionality subsets */ /* Extended Get Device Parameters */ ei->params.length = sizeof(ei->params); - ireg.ah = 0x48; - ireg.si = (size_t)&ei->params; - intcall(0x13, &ireg, &oreg); + ax = 0x4800; + dx = devno; + asm("pushfl; int $0x13; popfl" + : "+a" (ax), "+d" (dx), "=m" (ei->params) + : "S" (&ei->params) + : "ebx", "ecx", "edi"); /* Get legacy CHS parameters */ /* Ralf Brown recommends setting ES:DI to 0:0 */ - ireg.ah = 0x08; - ireg.es = 0; - intcall(0x13, &ireg, &oreg); - - if (!(oreg.eflags & X86_EFLAGS_CF)) { - ei->legacy_max_cylinder = oreg.ch + ((oreg.cl & 0xc0) << 2); - ei->legacy_max_head = oreg.dh; - ei->legacy_sectors_per_track = oreg.cl & 0x3f; + ax = 0x0800; + dx = devno; + di = 0; + asm("pushw %%es; " + "movw %%di,%%es; " + "pushfl; stc; int $0x13; setc %%al; popfl; " + "popw %%es" + : "+a" (ax), "=b" (bx), "=c" (cx), "+d" (dx), "+D" (di) + : : "esi"); + + if ((u8)ax == 0) { + ei->legacy_max_cylinder = (cx >> 8) + ((cx & 0xc0) << 2); + ei->legacy_max_head = dx >> 8; + ei->legacy_sectors_per_track = cx & 0x3f; } return 0; diff --git a/trunk/arch/x86/boot/header.S b/trunk/arch/x86/boot/header.S index b31cc54b4641..5d84d1c74e4c 100644 --- a/trunk/arch/x86/boot/header.S +++ b/trunk/arch/x86/boot/header.S @@ -22,8 +22,7 @@ #include #include #include "boot.h" -#include "voffset.h" -#include "zoffset.h" +#include "offsets.h" BOOTSEG = 0x07C0 /* original address of boot-sector */ SYSSEG = 0x1000 /* historical load address >> 4 */ @@ -116,7 +115,7 @@ _start: # Part 2 of the header, from the old setup.S .ascii "HdrS" # header signature - .word 0x020a # header version number (>= 0x0105) + .word 0x0209 # header version number (>= 0x0105) # or else old loadlin-1.5 will fail) .globl realmode_swtch realmode_swtch: .word 0, 0 # default_switch, SETUPSEG @@ -169,11 +168,7 @@ heap_end_ptr: .word _end+STACK_SIZE-512 # end of setup code can be used by setup # for local heap purposes. -ext_loader_ver: - .byte 0 # Extended boot loader version -ext_loader_type: - .byte 0 # Extended boot loader type - +pad1: .word 0 cmd_line_ptr: .long 0 # (Header version 0x0202 or later) # If nonzero, a 32-bit pointer # to the kernel command line. @@ -205,7 +200,7 @@ relocatable_kernel: .byte 1 #else relocatable_kernel: .byte 0 #endif -min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment +pad2: .byte 0 pad3: .word 0 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, @@ -217,27 +212,16 @@ hardware_subarch: .long 0 # subarchitecture, added with 2.07 hardware_subarch_data: .quad 0 -payload_offset: .long ZO_input_data -payload_length: .long ZO_z_input_len +payload_offset: .long input_data +payload_length: .long input_data_end-input_data setup_data: .quad 0 # 64-bit physical pointer to # single linked list of # struct setup_data -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr - -#define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) -#define VO_INIT_SIZE (VO__end - VO__text) -#if ZO_INIT_SIZE > VO_INIT_SIZE -#define INIT_SIZE ZO_INIT_SIZE -#else -#define INIT_SIZE VO_INIT_SIZE -#endif -init_size: .long INIT_SIZE # kernel initialization size - # End of setup header ##################################################### - .section ".entrytext", "ax" + .section ".inittext", "ax" start_of_setup: #ifdef SAFE_RESET_DISK_CONTROLLER # Reset the disk controller. diff --git a/trunk/arch/x86/boot/main.c b/trunk/arch/x86/boot/main.c index 140172b895bd..58f0415d3ae0 100644 --- a/trunk/arch/x86/boot/main.c +++ b/trunk/arch/x86/boot/main.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -62,10 +61,11 @@ static void copy_boot_params(void) */ static void keyboard_set_repeat(void) { - struct biosregs ireg; - initregs(&ireg); - ireg.ax = 0x0305; - intcall(0x16, &ireg, NULL); + u16 ax = 0x0305; + u16 bx = 0; + asm volatile("int $0x16" + : "+a" (ax), "+b" (bx) + : : "ecx", "edx", "esi", "edi"); } /* @@ -73,22 +73,18 @@ static void keyboard_set_repeat(void) */ static void query_ist(void) { - struct biosregs ireg, oreg; - /* Some older BIOSes apparently crash on this call, so filter it from machines too old to have SpeedStep at all. */ if (cpu.level < 6) return; - initregs(&ireg); - ireg.ax = 0xe980; /* IST Support */ - ireg.edx = 0x47534943; /* Request value */ - intcall(0x15, &ireg, &oreg); - - boot_params.ist_info.signature = oreg.eax; - boot_params.ist_info.command = oreg.ebx; - boot_params.ist_info.event = oreg.ecx; - boot_params.ist_info.perf_level = oreg.edx; + asm("int $0x15" + : "=a" (boot_params.ist_info.signature), + "=b" (boot_params.ist_info.command), + "=c" (boot_params.ist_info.event), + "=d" (boot_params.ist_info.perf_level) + : "a" (0x0000e980), /* IST Support */ + "d" (0x47534943)); /* Request value */ } /* @@ -97,12 +93,13 @@ static void query_ist(void) static void set_bios_mode(void) { #ifdef CONFIG_X86_64 - struct biosregs ireg; + u32 eax, ebx; - initregs(&ireg); - ireg.ax = 0xec00; - ireg.bx = 2; - intcall(0x15, &ireg, NULL); + eax = 0xec00; + ebx = 2; + asm volatile("int $0x15" + : "+a" (eax), "+b" (ebx) + : : "ecx", "edx", "esi", "edi"); #endif } diff --git a/trunk/arch/x86/boot/mca.c b/trunk/arch/x86/boot/mca.c index a95a531148ef..911eaae5d696 100644 --- a/trunk/arch/x86/boot/mca.c +++ b/trunk/arch/x86/boot/mca.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -17,22 +16,26 @@ int query_mca(void) { - struct biosregs ireg, oreg; - u16 len; - - initregs(&ireg); - ireg.ah = 0xc0; - intcall(0x15, &ireg, &oreg); - - if (oreg.eflags & X86_EFLAGS_CF) + u8 err; + u16 es, bx, len; + + asm("pushw %%es ; " + "int $0x15 ; " + "setc %0 ; " + "movw %%es, %1 ; " + "popw %%es" + : "=acd" (err), "=acdSD" (es), "=b" (bx) + : "a" (0xc000)); + + if (err) return -1; /* No MCA present */ - set_fs(oreg.es); - len = rdfs16(oreg.bx); + set_fs(es); + len = rdfs16(bx); if (len > sizeof(boot_params.sys_desc_table)) len = sizeof(boot_params.sys_desc_table); - copy_from_fs(&boot_params.sys_desc_table, oreg.bx, len); + copy_from_fs(&boot_params.sys_desc_table, bx, len); return 0; } diff --git a/trunk/arch/x86/boot/memory.c b/trunk/arch/x86/boot/memory.c index cae3feb1035e..74b3d2ba84e9 100644 --- a/trunk/arch/x86/boot/memory.c +++ b/trunk/arch/x86/boot/memory.c @@ -20,16 +20,12 @@ static int detect_memory_e820(void) { int count = 0; - struct biosregs ireg, oreg; + u32 next = 0; + u32 size, id, edi; + u8 err; struct e820entry *desc = boot_params.e820_map; static struct e820entry buf; /* static so it is zeroed */ - initregs(&ireg); - ireg.ax = 0xe820; - ireg.cx = sizeof buf; - ireg.edx = SMAP; - ireg.di = (size_t)&buf; - /* * Note: at least one BIOS is known which assumes that the * buffer pointed to by one e820 call is the same one as @@ -45,13 +41,22 @@ static int detect_memory_e820(void) */ do { - intcall(0x15, &ireg, &oreg); - ireg.ebx = oreg.ebx; /* for next iteration... */ + size = sizeof buf; + + /* Important: %edx and %esi are clobbered by some BIOSes, + so they must be either used for the error output + or explicitly marked clobbered. Given that, assume there + is something out there clobbering %ebp and %edi, too. */ + asm("pushl %%ebp; int $0x15; popl %%ebp; setc %0" + : "=d" (err), "+b" (next), "=a" (id), "+c" (size), + "=D" (edi), "+m" (buf) + : "D" (&buf), "d" (SMAP), "a" (0xe820) + : "esi"); /* BIOSes which terminate the chain with CF = 1 as opposed to %ebx = 0 don't always report the SMAP signature on the final, failing, probe. */ - if (oreg.eflags & X86_EFLAGS_CF) + if (err) break; /* Some BIOSes stop returning SMAP in the middle of @@ -59,64 +64,60 @@ static int detect_memory_e820(void) screwed up the map at that point, we might have a partial map, the full map, or complete garbage, so just return failure. */ - if (oreg.eax != SMAP) { + if (id != SMAP) { count = 0; break; } *desc++ = buf; count++; - } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map)); + } while (next && count < ARRAY_SIZE(boot_params.e820_map)); return boot_params.e820_entries = count; } static int detect_memory_e801(void) { - struct biosregs ireg, oreg; + u16 ax, bx, cx, dx; + u8 err; - initregs(&ireg); - ireg.ax = 0xe801; - intcall(0x15, &ireg, &oreg); + bx = cx = dx = 0; + ax = 0xe801; + asm("stc; int $0x15; setc %0" + : "=m" (err), "+a" (ax), "+b" (bx), "+c" (cx), "+d" (dx)); - if (oreg.eflags & X86_EFLAGS_CF) + if (err) return -1; /* Do we really need to do this? */ - if (oreg.cx || oreg.dx) { - oreg.ax = oreg.cx; - oreg.bx = oreg.dx; + if (cx || dx) { + ax = cx; + bx = dx; } - if (oreg.ax > 15*1024) { + if (ax > 15*1024) return -1; /* Bogus! */ - } else if (oreg.ax == 15*1024) { - boot_params.alt_mem_k = (oreg.dx << 6) + oreg.ax; - } else { - /* - * This ignores memory above 16MB if we have a memory - * hole there. If someone actually finds a machine - * with a memory hole at 16MB and no support for - * 0E820h they should probably generate a fake e820 - * map. - */ - boot_params.alt_mem_k = oreg.ax; - } + + /* This ignores memory above 16MB if we have a memory hole + there. If someone actually finds a machine with a memory + hole at 16MB and no support for 0E820h they should probably + generate a fake e820 map. */ + boot_params.alt_mem_k = (ax == 15*1024) ? (dx << 6)+ax : ax; return 0; } static int detect_memory_88(void) { - struct biosregs ireg, oreg; + u16 ax; + u8 err; - initregs(&ireg); - ireg.ah = 0x88; - intcall(0x15, &ireg, &oreg); + ax = 0x8800; + asm("stc; int $0x15; setc %0" : "=bcdm" (err), "+a" (ax)); - boot_params.screen_info.ext_mem_k = oreg.ax; + boot_params.screen_info.ext_mem_k = ax; - return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */ + return -err; } int detect_memory(void) diff --git a/trunk/arch/x86/boot/regs.c b/trunk/arch/x86/boot/regs.c deleted file mode 100644 index 958019b1cfa5..000000000000 --- a/trunk/arch/x86/boot/regs.c +++ /dev/null @@ -1,29 +0,0 @@ -/* ----------------------------------------------------------------------- - * - * Copyright 2009 Intel Corporation; author H. Peter Anvin - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2 or (at your - * option) any later version; incorporated herein by reference. - * - * ----------------------------------------------------------------------- */ - -/* - * Simple helper function for initializing a register set. - * - * Note that this sets EFLAGS_CF in the input register set; this - * makes it easier to catch functions which do nothing but don't - * explicitly set CF. - */ - -#include "boot.h" - -void initregs(struct biosregs *reg) -{ - memset(reg, 0, sizeof *reg); - reg->eflags |= X86_EFLAGS_CF; - reg->ds = ds(); - reg->es = ds(); - reg->fs = fs(); - reg->gs = gs(); -} diff --git a/trunk/arch/x86/boot/setup.ld b/trunk/arch/x86/boot/setup.ld index 0f6ec455a2b1..bb8dc2de7969 100644 --- a/trunk/arch/x86/boot/setup.ld +++ b/trunk/arch/x86/boot/setup.ld @@ -15,11 +15,8 @@ SECTIONS . = 497; .header : { *(.header) } - .entrytext : { *(.entrytext) } .inittext : { *(.inittext) } .initdata : { *(.initdata) } - __end_init = .; - .text : { *(.text) } .text32 : { *(.text32) } @@ -55,7 +52,4 @@ SECTIONS . = ASSERT(_end <= 0x8000, "Setup too big!"); . = ASSERT(hdr == 0x1f1, "The setup header has the wrong offset!"); - /* Necessary for the very-old-loader check to work... */ - . = ASSERT(__end_init <= 5*512, "init sections too big!"); - } diff --git a/trunk/arch/x86/boot/tty.c b/trunk/arch/x86/boot/tty.c index 01ec69c901c7..7e8e8b25f5f6 100644 --- a/trunk/arch/x86/boot/tty.c +++ b/trunk/arch/x86/boot/tty.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -23,23 +22,24 @@ void __attribute__((section(".inittext"))) putchar(int ch) { - struct biosregs ireg; + unsigned char c = ch; - if (ch == '\n') + if (c == '\n') putchar('\r'); /* \n -> \r\n */ - initregs(&ireg); - ireg.bx = 0x0007; - ireg.cx = 0x0001; - ireg.ah = 0x0e; - ireg.al = ch; - intcall(0x10, &ireg, NULL); + /* int $0x10 is known to have bugs involving touching registers + it shouldn't. Be extra conservative... */ + asm volatile("pushal; pushw %%ds; int $0x10; popw %%ds; popal" + : : "b" (0x0007), "c" (0x0001), "a" (0x0e00|ch)); } void __attribute__((section(".inittext"))) puts(const char *str) { - while (*str) + int n = 0; + while (*str) { putchar(*str++); + n++; + } } /* @@ -49,13 +49,14 @@ void __attribute__((section(".inittext"))) puts(const char *str) static u8 gettime(void) { - struct biosregs ireg, oreg; + u16 ax = 0x0200; + u16 cx, dx; - initregs(&ireg); - ireg.ah = 0x02; - intcall(0x1a, &ireg, &oreg); + asm volatile("int $0x1a" + : "+a" (ax), "=c" (cx), "=d" (dx) + : : "ebx", "esi", "edi"); - return oreg.dh; + return dx >> 8; } /* @@ -63,24 +64,19 @@ static u8 gettime(void) */ int getchar(void) { - struct biosregs ireg, oreg; - - initregs(&ireg); - /* ireg.ah = 0x00; */ - intcall(0x16, &ireg, &oreg); + u16 ax = 0; + asm volatile("int $0x16" : "+a" (ax)); - return oreg.al; + return ax & 0xff; } static int kbd_pending(void) { - struct biosregs ireg, oreg; - - initregs(&ireg); - ireg.ah = 0x01; - intcall(0x16, &ireg, &oreg); - - return !(oreg.eflags & X86_EFLAGS_ZF); + u8 pending; + asm volatile("int $0x16; setnz %0" + : "=qm" (pending) + : "a" (0x0100)); + return pending; } void kbd_flush(void) diff --git a/trunk/arch/x86/boot/video-bios.c b/trunk/arch/x86/boot/video-bios.c index d660be492363..3fa979c9c363 100644 --- a/trunk/arch/x86/boot/video-bios.c +++ b/trunk/arch/x86/boot/video-bios.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -30,21 +29,21 @@ static int bios_set_mode(struct mode_info *mi) static int set_bios_mode(u8 mode) { - struct biosregs ireg, oreg; + u16 ax; u8 new_mode; - initregs(&ireg); - ireg.al = mode; /* AH=0x00 Set Video Mode */ - intcall(0x10, &ireg, NULL); + ax = mode; /* AH=0x00 Set Video Mode */ + asm volatile(INT10 + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); - - ireg.ah = 0x0f; /* Get Current Video Mode */ - intcall(0x10, &ireg, &oreg); + ax = 0x0f00; /* Get Current Video Mode */ + asm volatile(INT10 + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); do_restore = 1; /* Assume video contents were lost */ - - /* Not all BIOSes are clean with the top bit */ - new_mode = ireg.al & 0x7f; + new_mode = ax & 0x7f; /* Not all BIOSes are clean with the top bit */ if (new_mode == mode) return 0; /* Mode change OK */ @@ -54,8 +53,10 @@ static int set_bios_mode(u8 mode) /* Mode setting failed, but we didn't end up where we started. That's bad. Try to revert to the original video mode. */ - ireg.ax = boot_params.screen_info.orig_video_mode; - intcall(0x10, &ireg, NULL); + ax = boot_params.screen_info.orig_video_mode; + asm volatile(INT10 + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); } #endif return -1; diff --git a/trunk/arch/x86/boot/video-vesa.c b/trunk/arch/x86/boot/video-vesa.c index c700147d6ffb..4a58c8ce3f69 100644 --- a/trunk/arch/x86/boot/video-vesa.c +++ b/trunk/arch/x86/boot/video-vesa.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -32,7 +31,7 @@ static inline void vesa_store_mode_params_graphics(void) {} static int vesa_probe(void) { #if defined(CONFIG_VIDEO_VESA) || defined(CONFIG_FIRMWARE_EDID) - struct biosregs ireg, oreg; + u16 ax, cx, di; u16 mode; addr_t mode_ptr; struct mode_info *mi; @@ -40,12 +39,13 @@ static int vesa_probe(void) video_vesa.modes = GET_HEAP(struct mode_info, 0); - initregs(&ireg); - ireg.ax = 0x4f00; - ireg.di = (size_t)&vginfo; - intcall(0x10, &ireg, &oreg); + ax = 0x4f00; + di = (size_t)&vginfo; + asm(INT10 + : "+a" (ax), "+D" (di), "=m" (vginfo) + : : "ebx", "ecx", "edx", "esi"); - if (ireg.ax != 0x004f || + if (ax != 0x004f || vginfo.signature != VESA_MAGIC || vginfo.version < 0x0102) return 0; /* Not present */ @@ -65,12 +65,14 @@ static int vesa_probe(void) memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ - ireg.ax = 0x4f01; - ireg.cx = mode; - ireg.di = (size_t)&vminfo; - intcall(0x10, &ireg, &oreg); + ax = 0x4f01; + cx = mode; + di = (size_t)&vminfo; + asm(INT10 + : "+a" (ax), "+c" (cx), "+D" (di), "=m" (vminfo) + : : "ebx", "edx", "esi"); - if (ireg.ax != 0x004f) + if (ax != 0x004f) continue; if ((vminfo.mode_attr & 0x15) == 0x05) { @@ -109,19 +111,20 @@ static int vesa_probe(void) static int vesa_set_mode(struct mode_info *mode) { - struct biosregs ireg, oreg; + u16 ax, bx, cx, di; int is_graphic; u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA; memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ - initregs(&ireg); - ireg.ax = 0x4f01; - ireg.cx = vesa_mode; - ireg.di = (size_t)&vminfo; - intcall(0x10, &ireg, &oreg); + ax = 0x4f01; + cx = vesa_mode; + di = (size_t)&vminfo; + asm(INT10 + : "+a" (ax), "+c" (cx), "+D" (di), "=m" (vminfo) + : : "ebx", "edx", "esi"); - if (oreg.ax != 0x004f) + if (ax != 0x004f) return -1; if ((vminfo.mode_attr & 0x15) == 0x05) { @@ -138,12 +141,14 @@ static int vesa_set_mode(struct mode_info *mode) } - initregs(&ireg); - ireg.ax = 0x4f02; - ireg.bx = vesa_mode; - intcall(0x10, &ireg, &oreg); + ax = 0x4f02; + bx = vesa_mode; + di = 0; + asm volatile(INT10 + : "+a" (ax), "+b" (bx), "+D" (di) + : : "ecx", "edx", "esi"); - if (oreg.ax != 0x004f) + if (ax != 0x004f) return -1; graphic_mode = is_graphic; @@ -166,45 +171,50 @@ static int vesa_set_mode(struct mode_info *mode) /* Switch DAC to 8-bit mode */ static void vesa_dac_set_8bits(void) { - struct biosregs ireg, oreg; u8 dac_size = 6; /* If possible, switch the DAC to 8-bit mode */ if (vginfo.capabilities & 1) { - initregs(&ireg); - ireg.ax = 0x4f08; - ireg.bh = 0x08; - intcall(0x10, &ireg, &oreg); - if (oreg.ax == 0x004f) - dac_size = oreg.bh; + u16 ax, bx; + + ax = 0x4f08; + bx = 0x0800; + asm volatile(INT10 + : "+a" (ax), "+b" (bx) + : : "ecx", "edx", "esi", "edi"); + + if (ax == 0x004f) + dac_size = bx >> 8; } /* Set the color sizes to the DAC size, and offsets to 0 */ - boot_params.screen_info.red_size = dac_size; + boot_params.screen_info.red_size = dac_size; boot_params.screen_info.green_size = dac_size; - boot_params.screen_info.blue_size = dac_size; - boot_params.screen_info.rsvd_size = dac_size; + boot_params.screen_info.blue_size = dac_size; + boot_params.screen_info.rsvd_size = dac_size; - boot_params.screen_info.red_pos = 0; - boot_params.screen_info.green_pos = 0; - boot_params.screen_info.blue_pos = 0; - boot_params.screen_info.rsvd_pos = 0; + boot_params.screen_info.red_pos = 0; + boot_params.screen_info.green_pos = 0; + boot_params.screen_info.blue_pos = 0; + boot_params.screen_info.rsvd_pos = 0; } /* Save the VESA protected mode info */ static void vesa_store_pm_info(void) { - struct biosregs ireg, oreg; + u16 ax, bx, di, es; - initregs(&ireg); - ireg.ax = 0x4f0a; - intcall(0x10, &ireg, &oreg); + ax = 0x4f0a; + bx = di = 0; + asm("pushw %%es; "INT10"; movw %%es,%0; popw %%es" + : "=d" (es), "+a" (ax), "+b" (bx), "+D" (di) + : : "ecx", "esi"); - if (oreg.ax != 0x004f) + if (ax != 0x004f) return; - boot_params.screen_info.vesapm_seg = oreg.es; - boot_params.screen_info.vesapm_off = oreg.di; + boot_params.screen_info.vesapm_seg = es; + boot_params.screen_info.vesapm_off = di; } /* @@ -242,7 +252,7 @@ static void vesa_store_mode_params_graphics(void) void vesa_store_edid(void) { #ifdef CONFIG_FIRMWARE_EDID - struct biosregs ireg, oreg; + u16 ax, bx, cx, dx, di; /* Apparently used as a nonsense token... */ memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info); @@ -250,26 +260,33 @@ void vesa_store_edid(void) if (vginfo.version < 0x0200) return; /* EDID requires VBE 2.0+ */ - initregs(&ireg); - ireg.ax = 0x4f15; /* VBE DDC */ - /* ireg.bx = 0x0000; */ /* Report DDC capabilities */ - /* ireg.cx = 0; */ /* Controller 0 */ - ireg.es = 0; /* ES:DI must be 0 by spec */ - intcall(0x10, &ireg, &oreg); + ax = 0x4f15; /* VBE DDC */ + bx = 0x0000; /* Report DDC capabilities */ + cx = 0; /* Controller 0 */ + di = 0; /* ES:DI must be 0 by spec */ + + /* Note: The VBE DDC spec is different from the main VESA spec; + we genuinely have to assume all registers are destroyed here. */ + + asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" + : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di) + : : "esi", "edx"); - if (oreg.ax != 0x004f) + if (ax != 0x004f) return; /* No EDID */ /* BH = time in seconds to transfer EDD information */ /* BL = DDC level supported */ - ireg.ax = 0x4f15; /* VBE DDC */ - ireg.bx = 0x0001; /* Read EDID */ - /* ireg.cx = 0; */ /* Controller 0 */ - /* ireg.dx = 0; */ /* EDID block number */ - ireg.es = ds(); - ireg.di =(size_t)&boot_params.edid_info; /* (ES:)Pointer to block */ - intcall(0x10, &ireg, &oreg); + ax = 0x4f15; /* VBE DDC */ + bx = 0x0001; /* Read EDID */ + cx = 0; /* Controller 0 */ + dx = 0; /* EDID block number */ + di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ + asm(INT10 + : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info), + "+c" (cx), "+D" (di) + : : "esi"); #endif /* CONFIG_FIRMWARE_EDID */ } diff --git a/trunk/arch/x86/boot/video-vga.c b/trunk/arch/x86/boot/video-vga.c index 8f8d827e254d..9e0587a37768 100644 --- a/trunk/arch/x86/boot/video-vga.c +++ b/trunk/arch/x86/boot/video-vga.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -40,30 +39,30 @@ static __videocard video_vga; /* Set basic 80x25 mode */ static u8 vga_set_basic_mode(void) { - struct biosregs ireg, oreg; u16 ax; u8 rows; u8 mode; - initregs(&ireg); - #ifdef CONFIG_VIDEO_400_HACK if (adapter >= ADAPTER_VGA) { - ireg.ax = 0x1202; - ireg.bx = 0x0030; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 + : : "a" (0x1202), "b" (0x0030) + : "ecx", "edx", "esi", "edi"); } #endif ax = 0x0f00; - intcall(0x10, &ireg, &oreg); - mode = oreg.al; + asm volatile(INT10 + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); + + mode = (u8)ax; set_fs(0); rows = rdfs8(0x484); /* rows minus one */ #ifndef CONFIG_VIDEO_400_HACK - if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) && + if ((ax == 0x5003 || ax == 0x5007) && (rows == 0 || rows == 24)) return mode; #endif @@ -72,8 +71,10 @@ static u8 vga_set_basic_mode(void) mode = 3; /* Set the mode */ - ireg.ax = mode; /* AH=0: set mode */ - intcall(0x10, &ireg, NULL); + ax = mode; + asm volatile(INT10 + : "+a" (ax) + : : "ebx", "ecx", "edx", "esi", "edi"); do_restore = 1; return mode; } @@ -81,69 +82,43 @@ static u8 vga_set_basic_mode(void) static void vga_set_8font(void) { /* Set 8x8 font - 80x43 on EGA, 80x50 on VGA */ - struct biosregs ireg; - - initregs(&ireg); /* Set 8x8 font */ - ireg.ax = 0x1112; - /* ireg.bl = 0; */ - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x1112), "b" (0)); /* Use alternate print screen */ - ireg.ax = 0x1200; - ireg.bl = 0x20; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x1200), "b" (0x20)); /* Turn off cursor emulation */ - ireg.ax = 0x1201; - ireg.bl = 0x34; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x1201), "b" (0x34)); /* Cursor is scan lines 6-7 */ - ireg.ax = 0x0100; - ireg.cx = 0x0607; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x0100), "c" (0x0607)); } static void vga_set_14font(void) { /* Set 9x14 font - 80x28 on VGA */ - struct biosregs ireg; - - initregs(&ireg); /* Set 9x14 font */ - ireg.ax = 0x1111; - /* ireg.bl = 0; */ - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x1111), "b" (0)); /* Turn off cursor emulation */ - ireg.ax = 0x1201; - ireg.bl = 0x34; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x1201), "b" (0x34)); /* Cursor is scan lines 11-12 */ - ireg.ax = 0x0100; - ireg.cx = 0x0b0c; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x0100), "c" (0x0b0c)); } static void vga_set_80x43(void) { /* Set 80x43 mode on VGA (not EGA) */ - struct biosregs ireg; - - initregs(&ireg); /* Set 350 scans */ - ireg.ax = 0x1201; - ireg.bl = 0x30; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x1201), "b" (0x30)); /* Reset video mode */ - ireg.ax = 0x0003; - intcall(0x10, &ireg, NULL); + asm volatile(INT10 : : "a" (0x0003)); vga_set_8font(); } @@ -250,6 +225,8 @@ static int vga_set_mode(struct mode_info *mode) */ static int vga_probe(void) { + u16 ega_bx; + static const char *card_name[] = { "CGA/MDA/HGC", "EGA", "VGA" }; @@ -263,26 +240,26 @@ static int vga_probe(void) sizeof(ega_modes)/sizeof(struct mode_info), sizeof(vga_modes)/sizeof(struct mode_info), }; + u8 vga_flag; - struct biosregs ireg, oreg; - - initregs(&ireg); - - ireg.ax = 0x1200; - ireg.bl = 0x10; /* Check EGA/VGA */ - intcall(0x10, &ireg, &oreg); + asm(INT10 + : "=b" (ega_bx) + : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */ + : "ecx", "edx", "esi", "edi"); #ifndef _WAKEUP - boot_params.screen_info.orig_video_ega_bx = oreg.bx; + boot_params.screen_info.orig_video_ega_bx = ega_bx; #endif /* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */ - if (oreg.bl != 0x10) { + if ((u8)ega_bx != 0x10) { /* EGA/VGA */ - ireg.ax = 0x1a00; - intcall(0x10, &ireg, &oreg); + asm(INT10 + : "=a" (vga_flag) + : "a" (0x1a00) + : "ebx", "ecx", "edx", "esi", "edi"); - if (oreg.al == 0x1a) { + if (vga_flag == 0x1a) { adapter = ADAPTER_VGA; #ifndef _WAKEUP boot_params.screen_info.orig_video_isVGA = 1; diff --git a/trunk/arch/x86/boot/video.c b/trunk/arch/x86/boot/video.c index bad728b76fc2..3bef2c1febe9 100644 --- a/trunk/arch/x86/boot/video.c +++ b/trunk/arch/x86/boot/video.c @@ -2,7 +2,6 @@ * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved - * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. @@ -19,29 +18,33 @@ static void store_cursor_position(void) { - struct biosregs ireg, oreg; + u16 curpos; + u16 ax, bx; - initregs(&ireg); - ireg.ah = 0x03; - intcall(0x10, &ireg, &oreg); + ax = 0x0300; + bx = 0; + asm(INT10 + : "=d" (curpos), "+a" (ax), "+b" (bx) + : : "ecx", "esi", "edi"); - boot_params.screen_info.orig_x = oreg.dl; - boot_params.screen_info.orig_y = oreg.dh; + boot_params.screen_info.orig_x = curpos; + boot_params.screen_info.orig_y = curpos >> 8; } static void store_video_mode(void) { - struct biosregs ireg, oreg; + u16 ax, page; /* N.B.: the saving of the video page here is a bit silly, since we pretty much assume page 0 everywhere. */ - initregs(&ireg); - ireg.ah = 0x0f; - intcall(0x10, &ireg, &oreg); + ax = 0x0f00; + asm(INT10 + : "+a" (ax), "=b" (page) + : : "ecx", "edx", "esi", "edi"); /* Not all BIOSes are clean with respect to the top bit */ - boot_params.screen_info.orig_video_mode = oreg.al & 0x7f; - boot_params.screen_info.orig_video_page = oreg.bh; + boot_params.screen_info.orig_video_mode = ax & 0x7f; + boot_params.screen_info.orig_video_page = page >> 8; } /* @@ -254,7 +257,7 @@ static void restore_screen(void) int y; addr_t dst = 0; u16 *src = saved.data; - struct biosregs ireg; + u16 ax, bx, dx; if (graphic_mode) return; /* Can't restore onto a graphic mode */ @@ -293,11 +296,12 @@ static void restore_screen(void) } /* Restore cursor position */ - initregs(&ireg); - ireg.ah = 0x02; /* Set cursor position */ - ireg.dh = saved.cury; - ireg.dl = saved.curx; - intcall(0x10, &ireg, NULL); + ax = 0x0200; /* Set cursor position */ + bx = 0; /* Page number (<< 8) */ + dx = (saved.cury << 8)+saved.curx; + asm volatile(INT10 + : "+a" (ax), "+b" (bx), "+d" (dx) + : : "ecx", "esi", "edi"); } #else #define save_screen() ((void)0) diff --git a/trunk/arch/x86/boot/video.h b/trunk/arch/x86/boot/video.h index 5bb174a997fc..ee63f5d14461 100644 --- a/trunk/arch/x86/boot/video.h +++ b/trunk/arch/x86/boot/video.h @@ -112,6 +112,20 @@ extern int force_x, force_y; /* Don't query the BIOS for cols/rows */ extern int do_restore; /* Restore screen contents */ extern int graphic_mode; /* Graphics mode with linear frame buffer */ +/* + * int $0x10 is notorious for touching registers it shouldn't. + * gcc doesn't like %ebp being clobbered, so define it as a push/pop + * sequence here. + * + * A number of systems, including the original PC can clobber %bp in + * certain circumstances, like when scrolling. There exists at least + * one Trident video card which could clobber DS under a set of + * circumstances that we are unlikely to encounter (scrolling when + * using an extended graphics mode of more than 800x600 pixels), but + * it's cheap insurance to deal with that here. + */ +#define INT10 "pushl %%ebp; pushw %%ds; int $0x10; popw %%ds; popl %%ebp" + /* Accessing VGA indexed registers */ static inline u8 in_idx(u16 port, u8 index) { diff --git a/trunk/arch/x86/configs/i386_defconfig b/trunk/arch/x86/configs/i386_defconfig index edb992ebef92..235b81d0f6f2 100644 --- a/trunk/arch/x86/configs/i386_defconfig +++ b/trunk/arch/x86/configs/i386_defconfig @@ -1,13 +1,12 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.30-rc2 -# Mon May 11 16:21:55 2009 +# Linux kernel version: 2.6.29-rc4 +# Tue Feb 24 15:50:58 2009 # # CONFIG_64BIT is not set CONFIG_X86_32=y # CONFIG_X86_64 is not set CONFIG_X86=y -CONFIG_OUTPUT_FORMAT="elf32-i386" CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CMOS_UPDATE=y @@ -34,7 +33,6 @@ CONFIG_ARCH_HAS_CPU_RELAX=y CONFIG_ARCH_HAS_DEFAULT_IDLE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y # CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -42,16 +40,15 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ARCH_POPULATES_NODE_MAP=y # CONFIG_AUDIT_ARCH is not set CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_X86_SMP=y CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_X86_32_SMP=y CONFIG_X86_HT=y +CONFIG_X86_BIOS_REBOOT=y CONFIG_X86_TRAMPOLINE=y -CONFIG_X86_32_LAZY_GS=y CONFIG_KTIME_SCALAR=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" @@ -63,17 +60,10 @@ CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set CONFIG_TASKSTATS=y @@ -123,26 +113,23 @@ CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y # CONFIG_EMBEDDED is not set CONFIG_UID16=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_EXTRA_PASS=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_PCSPKR_PLATFORM=y +# CONFIG_COMPAT_BRK is not set CONFIG_BASE_FULL=y CONFIG_FUTEX=y +CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y @@ -152,7 +139,6 @@ CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y CONFIG_PCI_QUIRKS=y CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set @@ -168,8 +154,6 @@ CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_API_DEBUG=y -# CONFIG_SLOW_WORK is not set CONFIG_HAVE_GENERIC_DMA_COHERENT=y CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y @@ -183,6 +167,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y # CONFIG_LBD is not set +CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_BSG=y # CONFIG_BLK_DEV_INTEGRITY is not set @@ -209,12 +194,12 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y CONFIG_SPARSE_IRQ=y +CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y -# CONFIG_X86_BIGSMP is not set -CONFIG_X86_EXTENDED_PLATFORM=y # CONFIG_X86_ELAN is not set +# CONFIG_X86_GENERICARCH is not set +# CONFIG_X86_VSMP is not set # CONFIG_X86_RDC321X is not set -# CONFIG_X86_32_NON_STANDARD is not set CONFIG_SCHED_OMIT_FRAME_POINTER=y # CONFIG_PARAVIRT_GUEST is not set # CONFIG_MEMTEST is not set @@ -245,10 +230,8 @@ CONFIG_M686=y # CONFIG_GENERIC_CPU is not set CONFIG_X86_GENERIC=y CONFIG_X86_CPU=y -CONFIG_X86_L1_CACHE_BYTES=64 -CONFIG_X86_INTERNODE_CACHE_BYTES=64 CONFIG_X86_CMPXCHG=y -CONFIG_X86_L1_CACHE_SHIFT=5 +CONFIG_X86_L1_CACHE_SHIFT=7 CONFIG_X86_XADD=y # CONFIG_X86_PPRO_FENCE is not set CONFIG_X86_WP_WORKS_OK=y @@ -264,7 +247,7 @@ CONFIG_X86_DEBUGCTLMSR=y CONFIG_CPU_SUP_INTEL=y CONFIG_CPU_SUP_CYRIX_32=y CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_CENTAUR_32=y CONFIG_CPU_SUP_TRANSMETA_32=y CONFIG_CPU_SUP_UMC_32=y CONFIG_X86_DS=y @@ -296,7 +279,6 @@ CONFIG_MICROCODE_AMD=y CONFIG_MICROCODE_OLD_INTERFACE=y CONFIG_X86_MSR=y CONFIG_X86_CPUID=y -# CONFIG_X86_CPU_DEBUG is not set # CONFIG_NOHIGHMEM is not set CONFIG_HIGHMEM4G=y # CONFIG_HIGHMEM64G is not set @@ -320,8 +302,6 @@ CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y CONFIG_UNEVICTABLE_LRU=y -CONFIG_HAVE_MLOCK=y -CONFIG_HAVE_MLOCKED_PAGE_BIT=y CONFIG_HIGHPTE=y CONFIG_X86_CHECK_BIOS_CORRUPTION=y CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y @@ -332,7 +312,6 @@ CONFIG_MTRR=y CONFIG_X86_PAT=y CONFIG_EFI=y CONFIG_SECCOMP=y -# CONFIG_CC_STACKPROTECTOR is not set # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set @@ -343,9 +322,8 @@ CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y # CONFIG_KEXEC_JUMP is not set CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x1000000 +# CONFIG_RELOCATABLE is not set +CONFIG_PHYSICAL_ALIGN=0x200000 CONFIG_HOTPLUG_CPU=y # CONFIG_COMPAT_VDSO is not set # CONFIG_CMDLINE_BOOL is not set @@ -385,6 +363,7 @@ CONFIG_ACPI_THERMAL=y CONFIG_ACPI_BLACKLIST_YEAR=0 # CONFIG_ACPI_DEBUG is not set # CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_SYSTEM=y CONFIG_X86_PM_TIMER=y CONFIG_ACPI_CONTAINER=y # CONFIG_ACPI_SBS is not set @@ -446,7 +425,6 @@ CONFIG_PCI_BIOS=y CONFIG_PCI_DIRECT=y CONFIG_PCI_MMCONFIG=y CONFIG_PCI_DOMAINS=y -# CONFIG_DMAR is not set CONFIG_PCIEPORTBUS=y # CONFIG_HOTPLUG_PCI_PCIE is not set CONFIG_PCIEAER=y @@ -457,7 +435,6 @@ CONFIG_PCI_MSI=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_STUB is not set CONFIG_HT_IRQ=y -# CONFIG_PCI_IOV is not set CONFIG_ISA_DMA_API=y # CONFIG_ISA is not set # CONFIG_MCA is not set @@ -504,6 +481,7 @@ CONFIG_NET=y # # Networking options # +CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -661,7 +639,6 @@ CONFIG_LLC=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set CONFIG_NET_SCHED=y # @@ -719,7 +696,6 @@ CONFIG_NET_SCH_FIFO=y # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set -# CONFIG_NET_DROP_MONITOR is not set CONFIG_HAMRADIO=y # @@ -730,10 +706,12 @@ CONFIG_HAMRADIO=y # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set CONFIG_FIB_RULES=y CONFIG_WIRELESS=y CONFIG_CFG80211=y # CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_NL80211=y CONFIG_WIRELESS_OLD_REGULATORY=y CONFIG_WIRELESS_EXT=y CONFIG_WIRELESS_EXT_SYSFS=y @@ -811,7 +789,6 @@ CONFIG_MISC_DEVICES=y # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_HP_ILO is not set -# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set # @@ -865,7 +842,6 @@ CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set # CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_ATA=y # CONFIG_ATA_NONSTANDARD is not set CONFIG_ATA_ACPI=y @@ -964,7 +940,6 @@ CONFIG_DM_ZERO=y CONFIG_MACINTOSH_DRIVERS=y CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y -CONFIG_COMPAT_NET_DEV_OPS=y # CONFIG_IFB is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set @@ -1002,8 +977,6 @@ CONFIG_MII=y CONFIG_NET_VENDOR_3COM=y # CONFIG_VORTEX is not set # CONFIG_TYPHOON is not set -# CONFIG_ETHOC is not set -# CONFIG_DNET is not set CONFIG_NET_TULIP=y # CONFIG_DE2104X is not set # CONFIG_TULIP is not set @@ -1053,7 +1026,6 @@ CONFIG_E1000=y CONFIG_E1000E=y # CONFIG_IP1000 is not set # CONFIG_IGB is not set -# CONFIG_IGBVF is not set # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -1068,7 +1040,6 @@ CONFIG_BNX2=y # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set # CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set # CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set @@ -1078,7 +1049,6 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set -# CONFIG_VXGE is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set @@ -1088,7 +1058,6 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_BNX2X is not set # CONFIG_QLGE is not set # CONFIG_SFC is not set -# CONFIG_BE2NET is not set CONFIG_TR=y # CONFIG_IBMOL is not set # CONFIG_IBMLS is not set @@ -1104,8 +1073,8 @@ CONFIG_WLAN_80211=y # CONFIG_LIBERTAS is not set # CONFIG_LIBERTAS_THINFIRM is not set # CONFIG_AIRO is not set +# CONFIG_HERMES is not set # CONFIG_ATMEL is not set -# CONFIG_AT76C50X_USB is not set # CONFIG_AIRO_CS is not set # CONFIG_PCMCIA_WL3501 is not set # CONFIG_PRISM54 is not set @@ -1115,21 +1084,21 @@ CONFIG_WLAN_80211=y # CONFIG_RTL8187 is not set # CONFIG_ADM8211 is not set # CONFIG_MAC80211_HWSIM is not set -# CONFIG_MWL8K is not set # CONFIG_P54_COMMON is not set CONFIG_ATH5K=y # CONFIG_ATH5K_DEBUG is not set # CONFIG_ATH9K is not set -# CONFIG_AR9170_USB is not set # CONFIG_IPW2100 is not set # CONFIG_IPW2200 is not set -# CONFIG_IWLWIFI is not set +# CONFIG_IWLCORE is not set +# CONFIG_IWLWIFI_LEDS is not set +# CONFIG_IWLAGN is not set +# CONFIG_IWL3945 is not set # CONFIG_HOSTAP is not set # CONFIG_B43 is not set # CONFIG_B43LEGACY is not set # CONFIG_ZD1211RW is not set # CONFIG_RT2X00 is not set -# CONFIG_HERMES is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -1240,8 +1209,6 @@ CONFIG_INPUT_TABLET=y # CONFIG_TABLET_USB_KBTAB is not set # CONFIG_TABLET_USB_WACOM is not set CONFIG_INPUT_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_AD7879_I2C is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set @@ -1336,7 +1303,6 @@ CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y -# CONFIG_HW_RANDOM_TIMERIOMEM is not set CONFIG_HW_RANDOM_INTEL=y CONFIG_HW_RANDOM_AMD=y CONFIG_HW_RANDOM_GEODE=y @@ -1424,6 +1390,7 @@ CONFIG_I2C_I801=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set @@ -1457,7 +1424,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADT7475 is not set # CONFIG_SENSORS_K8TEMP is not set # CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ATK0110 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_I5K_AMB is not set @@ -1467,7 +1433,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_FSCHER is not set # CONFIG_SENSORS_FSCPOS is not set # CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_CORETEMP is not set @@ -1483,14 +1448,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LM90 is not set # CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LTC4215 is not set # CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LM95241 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_SMSC47M1 is not set @@ -1681,6 +1643,7 @@ CONFIG_FB_EFI=y # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set # CONFIG_FB_VT8623 is not set +# CONFIG_FB_CYBLA is not set # CONFIG_FB_TRIDENT is not set # CONFIG_FB_ARK is not set # CONFIG_FB_PM3 is not set @@ -1689,7 +1652,6 @@ CONFIG_FB_EFI=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y @@ -1776,8 +1738,6 @@ CONFIG_SND_PCI=y # CONFIG_SND_INDIGO is not set # CONFIG_SND_INDIGOIO is not set # CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set # CONFIG_SND_EMU10K1 is not set # CONFIG_SND_EMU10K1X is not set # CONFIG_SND_ENS1370 is not set @@ -1851,17 +1811,15 @@ CONFIG_USB_HIDDEV=y # # Special HID drivers # +CONFIG_HID_COMPAT=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y -# CONFIG_DRAGONRISE_FF is not set CONFIG_HID_EZKEY=y -CONFIG_HID_KYE=y CONFIG_HID_GYRATION=y -CONFIG_HID_KENSINGTON=y CONFIG_HID_LOGITECH=y CONFIG_LOGITECH_FF=y # CONFIG_LOGIRUMBLEPAD2_FF is not set @@ -1927,11 +1885,11 @@ CONFIG_USB_PRINTER=y # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# also be needed; see USB_STORAGE Help for more info +# see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set @@ -1973,6 +1931,7 @@ CONFIG_USB_LIBUSUAL=y # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set # CONFIG_USB_IDMOUSE is not set # CONFIG_USB_FTDI_ELAN is not set # CONFIG_USB_APPLEDISPLAY is not set @@ -1988,7 +1947,6 @@ CONFIG_USB_LIBUSUAL=y # # OTG and related infrastructure # -# CONFIG_NOP_USB_XCEIV is not set # CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -2000,10 +1958,8 @@ CONFIG_LEDS_CLASS=y # # CONFIG_LEDS_ALIX2 is not set # CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_LP5521 is not set # CONFIG_LEDS_CLEVO_MAIL is not set # CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -2013,10 +1969,6 @@ CONFIG_LEDS_TRIGGERS=y # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set CONFIG_EDAC=y @@ -2085,7 +2037,6 @@ CONFIG_DMADEVICES=y # DMA Devices # # CONFIG_INTEL_IOATDMA is not set -# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # CONFIG_STAGING is not set CONFIG_X86_PLATFORM_DEVICES=y @@ -2120,7 +2071,6 @@ CONFIG_DMIID=y # # CONFIG_EXT2_FS is not set CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y @@ -2150,11 +2100,6 @@ CONFIG_AUTOFS4_FS=y # CONFIG_FUSE_FS is not set CONFIG_GENERIC_ACL=y -# -# Caches -# -# CONFIG_FSCACHE is not set - # # CD-ROM/DVD Filesystems # @@ -2206,7 +2151,6 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y @@ -2220,6 +2164,7 @@ CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -2306,7 +2251,6 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set # CONFIG_DETECT_SOFTLOCKUP is not set -# CONFIG_DETECT_HUNG_TASK is not set # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y @@ -2322,7 +2266,6 @@ CONFIG_TIMER_STATS=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_HIGHMEM is not set CONFIG_DEBUG_BUGVERBOSE=y @@ -2346,19 +2289,13 @@ CONFIG_FRAME_POINTER=y # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_DEBUG_PAGEALLOC is not set CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_HW_BRANCH_TRACER=y -CONFIG_HAVE_FTRACE_SYSCALLS=y -CONFIG_RING_BUFFER=y -CONFIG_TRACING=y -CONFIG_TRACING_SUPPORT=y # # Tracers @@ -2368,21 +2305,13 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_SYSPROF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_EVENT_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set # CONFIG_BOOT_TRACER is not set # CONFIG_TRACE_BRANCH_PROFILING is not set # CONFIG_POWER_TRACER is not set # CONFIG_STACK_TRACER is not set # CONFIG_HW_BRANCH_TRACER is not set -# CONFIG_KMEMTRACE is not set -# CONFIG_WORKQUEUE_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -# CONFIG_DYNAMIC_DEBUG is not set -# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -2392,6 +2321,7 @@ CONFIG_EARLY_PRINTK=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_DEBUG_PER_CPU_MAPS is not set # CONFIG_X86_PTDUMP is not set CONFIG_DEBUG_RODATA=y @@ -2399,7 +2329,7 @@ CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_NX_TEST=m # CONFIG_4KSTACKS is not set CONFIG_DOUBLEFAULT=y -CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_MMIOTRACE is not set CONFIG_IO_DELAY_TYPE_0X80=0 CONFIG_IO_DELAY_TYPE_0XED=1 CONFIG_IO_DELAY_TYPE_UDELAY=2 @@ -2435,8 +2365,6 @@ CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set # CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_IMA is not set CONFIG_CRYPTO=y # @@ -2452,12 +2380,10 @@ CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set -CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=y # CONFIG_CRYPTO_TEST is not set @@ -2530,7 +2456,6 @@ CONFIG_CRYPTO_DES=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -2542,13 +2467,11 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_GEODE is not set # CONFIG_CRYPTO_DEV_HIFN_795X is not set CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y CONFIG_VIRTUALIZATION=y # CONFIG_KVM is not set # CONFIG_LGUEST is not set # CONFIG_VIRTIO_PCI is not set # CONFIG_VIRTIO_BALLOON is not set -CONFIG_BINARY_PRINTF=y # # Library routines @@ -2566,10 +2489,7 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_AUDIT_GENERIC=y CONFIG_ZLIB_INFLATE=y -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y +CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y -CONFIG_NLATTR=y diff --git a/trunk/arch/x86/configs/x86_64_defconfig b/trunk/arch/x86/configs/x86_64_defconfig index cee1dd2e69b2..9fe5d212ab4c 100644 --- a/trunk/arch/x86/configs/x86_64_defconfig +++ b/trunk/arch/x86/configs/x86_64_defconfig @@ -1,13 +1,12 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.30-rc2 -# Mon May 11 16:22:00 2009 +# Linux kernel version: 2.6.29-rc4 +# Tue Feb 24 15:44:16 2009 # CONFIG_64BIT=y # CONFIG_X86_32 is not set CONFIG_X86_64=y CONFIG_X86=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CMOS_UPDATE=y @@ -35,7 +34,6 @@ CONFIG_ARCH_HAS_CPU_RELAX=y CONFIG_ARCH_HAS_DEFAULT_IDLE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -43,14 +41,14 @@ CONFIG_ZONE_DMA32=y CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_AUDIT_ARCH=y CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_X86_SMP=y CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_X86_64_SMP=y CONFIG_X86_HT=y +CONFIG_X86_BIOS_REBOOT=y CONFIG_X86_TRAMPOLINE=y # CONFIG_KTIME_SCALAR is not set CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" @@ -63,17 +61,10 @@ CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set CONFIG_TASKSTATS=y @@ -123,26 +114,23 @@ CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y # CONFIG_EMBEDDED is not set CONFIG_UID16=y CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_EXTRA_PASS=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_PCSPKR_PLATFORM=y +# CONFIG_COMPAT_BRK is not set CONFIG_BASE_FULL=y CONFIG_FUTEX=y +CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y @@ -152,7 +140,6 @@ CONFIG_AIO=y CONFIG_VM_EVENT_COUNTERS=y CONFIG_PCI_QUIRKS=y CONFIG_SLUB_DEBUG=y -# CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set @@ -168,8 +155,6 @@ CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_API_DEBUG=y -# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y @@ -182,6 +167,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_MODULE_SRCVERSION_ALL is not set CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y +CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_BSG=y # CONFIG_BLK_DEV_INTEGRITY is not set CONFIG_BLOCK_COMPAT=y @@ -209,10 +195,12 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_SMP=y CONFIG_SPARSE_IRQ=y +# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set +CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_MPPARSE=y -CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_ELAN is not set +# CONFIG_X86_GENERICARCH is not set # CONFIG_X86_VSMP is not set -# CONFIG_X86_UV is not set CONFIG_SCHED_OMIT_FRAME_POINTER=y # CONFIG_PARAVIRT_GUEST is not set # CONFIG_MEMTEST is not set @@ -242,10 +230,10 @@ CONFIG_SCHED_OMIT_FRAME_POINTER=y # CONFIG_MCORE2 is not set CONFIG_GENERIC_CPU=y CONFIG_X86_CPU=y -CONFIG_X86_L1_CACHE_BYTES=64 -CONFIG_X86_INTERNODE_CACHE_BYTES=64 +CONFIG_X86_L1_CACHE_BYTES=128 +CONFIG_X86_INTERNODE_CACHE_BYTES=128 CONFIG_X86_CMPXCHG=y -CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=7 CONFIG_X86_WP_WORKS_OK=y CONFIG_X86_TSC=y CONFIG_X86_CMPXCHG64=y @@ -254,7 +242,7 @@ CONFIG_X86_MINIMUM_CPU_FAMILY=64 CONFIG_X86_DEBUGCTLMSR=y CONFIG_CPU_SUP_INTEL=y CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_CENTAUR_64=y CONFIG_X86_DS=y CONFIG_X86_PTRACE_BTS=y CONFIG_HPET_TIMER=y @@ -281,7 +269,6 @@ CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y CONFIG_X86_MCE=y CONFIG_X86_MCE_INTEL=y CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y # CONFIG_I8K is not set CONFIG_MICROCODE=y CONFIG_MICROCODE_INTEL=y @@ -289,7 +276,6 @@ CONFIG_MICROCODE_AMD=y CONFIG_MICROCODE_OLD_INTERFACE=y CONFIG_X86_MSR=y CONFIG_X86_CPUID=y -# CONFIG_X86_CPU_DEBUG is not set CONFIG_ARCH_PHYS_ADDR_T_64BIT=y CONFIG_DIRECT_GBPAGES=y CONFIG_NUMA=y @@ -323,8 +309,6 @@ CONFIG_ZONE_DMA_FLAG=1 CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y CONFIG_UNEVICTABLE_LRU=y -CONFIG_HAVE_MLOCK=y -CONFIG_HAVE_MLOCKED_PAGE_BIT=y CONFIG_X86_CHECK_BIOS_CORRUPTION=y CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y CONFIG_X86_RESERVE_LOW_64K=y @@ -333,7 +317,6 @@ CONFIG_MTRR=y CONFIG_X86_PAT=y CONFIG_EFI=y CONFIG_SECCOMP=y -# CONFIG_CC_STACKPROTECTOR is not set # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set @@ -342,10 +325,9 @@ CONFIG_HZ=1000 CONFIG_SCHED_HRTICK=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y -# CONFIG_KEXEC_JUMP is not set CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_PHYSICAL_ALIGN=0x1000000 +# CONFIG_RELOCATABLE is not set +CONFIG_PHYSICAL_ALIGN=0x200000 CONFIG_HOTPLUG_CPU=y # CONFIG_COMPAT_VDSO is not set # CONFIG_CMDLINE_BOOL is not set @@ -388,6 +370,7 @@ CONFIG_ACPI_NUMA=y CONFIG_ACPI_BLACKLIST_YEAR=0 # CONFIG_ACPI_DEBUG is not set # CONFIG_ACPI_PCI_SLOT is not set +CONFIG_ACPI_SYSTEM=y CONFIG_X86_PM_TIMER=y CONFIG_ACPI_CONTAINER=y # CONFIG_ACPI_SBS is not set @@ -453,7 +436,6 @@ CONFIG_PCI_MSI=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_STUB is not set CONFIG_HT_IRQ=y -# CONFIG_PCI_IOV is not set CONFIG_ISA_DMA_API=y CONFIG_K8_NB=y CONFIG_PCCARD=y @@ -499,6 +481,7 @@ CONFIG_NET=y # # Networking options # +CONFIG_COMPAT_NET_DEV_OPS=y CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -656,7 +639,6 @@ CONFIG_LLC=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set -# CONFIG_PHONET is not set CONFIG_NET_SCHED=y # @@ -714,7 +696,6 @@ CONFIG_NET_SCH_FIFO=y # # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set -# CONFIG_NET_DROP_MONITOR is not set CONFIG_HAMRADIO=y # @@ -725,10 +706,12 @@ CONFIG_HAMRADIO=y # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +# CONFIG_PHONET is not set CONFIG_FIB_RULES=y CONFIG_WIRELESS=y CONFIG_CFG80211=y # CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_NL80211=y CONFIG_WIRELESS_OLD_REGULATORY=y CONFIG_WIRELESS_EXT=y CONFIG_WIRELESS_EXT_SYSFS=y @@ -805,8 +788,9 @@ CONFIG_MISC_DEVICES=y # CONFIG_TIFM_CORE is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_SGI_XP is not set # CONFIG_HP_ILO is not set -# CONFIG_ISL29003 is not set +# CONFIG_SGI_GRU is not set # CONFIG_C2PORT is not set # @@ -860,7 +844,6 @@ CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set # CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_ATA=y # CONFIG_ATA_NONSTANDARD is not set CONFIG_ATA_ACPI=y @@ -957,7 +940,6 @@ CONFIG_DM_ZERO=y CONFIG_MACINTOSH_DRIVERS=y CONFIG_MAC_EMUMOUSEBTN=y CONFIG_NETDEVICES=y -CONFIG_COMPAT_NET_DEV_OPS=y # CONFIG_IFB is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set @@ -995,8 +977,6 @@ CONFIG_MII=y CONFIG_NET_VENDOR_3COM=y # CONFIG_VORTEX is not set # CONFIG_TYPHOON is not set -# CONFIG_ETHOC is not set -# CONFIG_DNET is not set CONFIG_NET_TULIP=y # CONFIG_DE2104X is not set # CONFIG_TULIP is not set @@ -1046,7 +1026,6 @@ CONFIG_E1000=y # CONFIG_E1000E is not set # CONFIG_IP1000 is not set # CONFIG_IGB is not set -# CONFIG_IGBVF is not set # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -1061,7 +1040,6 @@ CONFIG_TIGON3=y # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set # CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set # CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set @@ -1071,7 +1049,6 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set -# CONFIG_VXGE is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set @@ -1081,7 +1058,6 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_BNX2X is not set # CONFIG_QLGE is not set # CONFIG_SFC is not set -# CONFIG_BE2NET is not set CONFIG_TR=y # CONFIG_IBMOL is not set # CONFIG_3C359 is not set @@ -1096,8 +1072,8 @@ CONFIG_WLAN_80211=y # CONFIG_LIBERTAS is not set # CONFIG_LIBERTAS_THINFIRM is not set # CONFIG_AIRO is not set +# CONFIG_HERMES is not set # CONFIG_ATMEL is not set -# CONFIG_AT76C50X_USB is not set # CONFIG_AIRO_CS is not set # CONFIG_PCMCIA_WL3501 is not set # CONFIG_PRISM54 is not set @@ -1107,21 +1083,21 @@ CONFIG_WLAN_80211=y # CONFIG_RTL8187 is not set # CONFIG_ADM8211 is not set # CONFIG_MAC80211_HWSIM is not set -# CONFIG_MWL8K is not set # CONFIG_P54_COMMON is not set CONFIG_ATH5K=y # CONFIG_ATH5K_DEBUG is not set # CONFIG_ATH9K is not set -# CONFIG_AR9170_USB is not set # CONFIG_IPW2100 is not set # CONFIG_IPW2200 is not set -# CONFIG_IWLWIFI is not set +# CONFIG_IWLCORE is not set +# CONFIG_IWLWIFI_LEDS is not set +# CONFIG_IWLAGN is not set +# CONFIG_IWL3945 is not set # CONFIG_HOSTAP is not set # CONFIG_B43 is not set # CONFIG_B43LEGACY is not set # CONFIG_ZD1211RW is not set # CONFIG_RT2X00 is not set -# CONFIG_HERMES is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -1232,8 +1208,6 @@ CONFIG_INPUT_TABLET=y # CONFIG_TABLET_USB_KBTAB is not set # CONFIG_TABLET_USB_WACOM is not set CONFIG_INPUT_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_AD7879_I2C is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set @@ -1327,7 +1301,6 @@ CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y -# CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_HW_RANDOM_INTEL is not set # CONFIG_HW_RANDOM_AMD is not set CONFIG_NVRAM=y @@ -1409,6 +1382,7 @@ CONFIG_I2C_I801=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set @@ -1442,7 +1416,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADT7475 is not set # CONFIG_SENSORS_K8TEMP is not set # CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ATK0110 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_I5K_AMB is not set @@ -1452,7 +1425,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_FSCHER is not set # CONFIG_SENSORS_FSCPOS is not set # CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_CORETEMP is not set @@ -1468,14 +1440,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LM90 is not set # CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LTC4215 is not set # CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LM95241 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_SMSC47M1 is not set @@ -1666,7 +1635,6 @@ CONFIG_FB_EFI=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y # CONFIG_LCD_CLASS_DEVICE is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y @@ -1752,8 +1720,6 @@ CONFIG_SND_PCI=y # CONFIG_SND_INDIGO is not set # CONFIG_SND_INDIGOIO is not set # CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set # CONFIG_SND_EMU10K1 is not set # CONFIG_SND_EMU10K1X is not set # CONFIG_SND_ENS1370 is not set @@ -1826,17 +1792,15 @@ CONFIG_USB_HIDDEV=y # # Special HID drivers # +CONFIG_HID_COMPAT=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y -# CONFIG_DRAGONRISE_FF is not set CONFIG_HID_EZKEY=y -CONFIG_HID_KYE=y CONFIG_HID_GYRATION=y -CONFIG_HID_KENSINGTON=y CONFIG_HID_LOGITECH=y CONFIG_LOGITECH_FF=y # CONFIG_LOGIRUMBLEPAD2_FF is not set @@ -1902,11 +1866,11 @@ CONFIG_USB_PRINTER=y # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; # # -# also be needed; see USB_STORAGE Help for more info +# see USB_STORAGE Help for more information # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set @@ -1948,6 +1912,7 @@ CONFIG_USB_LIBUSUAL=y # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGET is not set # CONFIG_USB_IDMOUSE is not set # CONFIG_USB_FTDI_ELAN is not set # CONFIG_USB_APPLEDISPLAY is not set @@ -1963,7 +1928,6 @@ CONFIG_USB_LIBUSUAL=y # # OTG and related infrastructure # -# CONFIG_NOP_USB_XCEIV is not set # CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -1975,10 +1939,8 @@ CONFIG_LEDS_CLASS=y # # CONFIG_LEDS_ALIX2 is not set # CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_LP5521 is not set # CONFIG_LEDS_CLEVO_MAIL is not set # CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_BD2802 is not set # # LED Triggers @@ -1988,10 +1950,6 @@ CONFIG_LEDS_TRIGGERS=y # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set # CONFIG_LEDS_TRIGGER_BACKLIGHT is not set # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set CONFIG_EDAC=y @@ -2060,7 +2018,6 @@ CONFIG_DMADEVICES=y # DMA Devices # # CONFIG_INTEL_IOATDMA is not set -# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # CONFIG_STAGING is not set CONFIG_X86_PLATFORM_DEVICES=y @@ -2094,7 +2051,6 @@ CONFIG_DMIID=y # # CONFIG_EXT2_FS is not set CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y @@ -2125,11 +2081,6 @@ CONFIG_AUTOFS4_FS=y # CONFIG_FUSE_FS is not set CONFIG_GENERIC_ACL=y -# -# Caches -# -# CONFIG_FSCACHE is not set - # # CD-ROM/DVD Filesystems # @@ -2181,7 +2132,6 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y @@ -2195,6 +2145,7 @@ CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_REGISTER_V4 is not set CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -2281,7 +2232,6 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set # CONFIG_DETECT_SOFTLOCKUP is not set -# CONFIG_DETECT_HUNG_TASK is not set # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y @@ -2297,7 +2247,6 @@ CONFIG_TIMER_STATS=y # CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_STACKTRACE=y # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set @@ -2320,19 +2269,13 @@ CONFIG_FRAME_POINTER=y # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y -# CONFIG_DEBUG_PAGEALLOC is not set CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_HAVE_HW_BRANCH_TRACER=y -CONFIG_HAVE_FTRACE_SYSCALLS=y -CONFIG_RING_BUFFER=y -CONFIG_TRACING=y -CONFIG_TRACING_SUPPORT=y # # Tracers @@ -2342,21 +2285,13 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_SYSPROF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_EVENT_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set # CONFIG_BOOT_TRACER is not set # CONFIG_TRACE_BRANCH_PROFILING is not set # CONFIG_POWER_TRACER is not set # CONFIG_STACK_TRACER is not set # CONFIG_HW_BRANCH_TRACER is not set -# CONFIG_KMEMTRACE is not set -# CONFIG_WORKQUEUE_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -# CONFIG_DYNAMIC_DEBUG is not set -# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DYNAMIC_PRINTK_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -2366,13 +2301,14 @@ CONFIG_EARLY_PRINTK=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_DEBUG_PER_CPU_MAPS is not set # CONFIG_X86_PTDUMP is not set CONFIG_DEBUG_RODATA=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_NX_TEST=m # CONFIG_IOMMU_DEBUG is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y +# CONFIG_MMIOTRACE is not set CONFIG_IO_DELAY_TYPE_0X80=0 CONFIG_IO_DELAY_TYPE_0XED=1 CONFIG_IO_DELAY_TYPE_UDELAY=2 @@ -2408,8 +2344,6 @@ CONFIG_SECURITY_SELINUX_AVC_STATS=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set # CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_IMA is not set CONFIG_CRYPTO=y # @@ -2425,12 +2359,10 @@ CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_GF128MUL is not set # CONFIG_CRYPTO_NULL is not set -CONFIG_CRYPTO_WORKQUEUE=y # CONFIG_CRYPTO_CRYPTD is not set CONFIG_CRYPTO_AUTHENC=y # CONFIG_CRYPTO_TEST is not set @@ -2482,7 +2414,6 @@ CONFIG_CRYPTO_SHA1=y # CONFIG_CRYPTO_AES=y # CONFIG_CRYPTO_AES_X86_64 is not set -# CONFIG_CRYPTO_AES_NI_INTEL is not set # CONFIG_CRYPTO_ANUBIS is not set CONFIG_CRYPTO_ARC4=y # CONFIG_CRYPTO_BLOWFISH is not set @@ -2504,7 +2435,6 @@ CONFIG_CRYPTO_DES=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -2514,12 +2444,10 @@ CONFIG_CRYPTO_DES=y CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_HIFN_795X is not set CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y CONFIG_VIRTUALIZATION=y # CONFIG_KVM is not set # CONFIG_VIRTIO_PCI is not set # CONFIG_VIRTIO_BALLOON is not set -CONFIG_BINARY_PRINTF=y # # Library routines @@ -2536,10 +2464,7 @@ CONFIG_CRC32=y # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y +CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y -CONFIG_NLATTR=y diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 1a37bcdc8606..f6aa18eadf71 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -3,7 +3,6 @@ #include #include -#include #include /* @@ -75,22 +74,6 @@ static inline void alternatives_smp_switch(int smp) {} const unsigned char *const *find_nop_table(void); -/* alternative assembly primitive: */ -#define ALTERNATIVE(oldinstr, newinstr, feature) \ - \ - "661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - _ASM_ALIGN "\n" \ - _ASM_PTR "661b\n" /* label */ \ - _ASM_PTR "663f\n" /* new instruction */ \ - " .byte " __stringify(feature) "\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement, \"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" - /* * Alternative instructions for different CPU types or capabilities. * @@ -104,7 +87,18 @@ const unsigned char *const *find_nop_table(void); * without volatile and memory clobber. */ #define alternative(oldinstr, newinstr, feature) \ - asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661b\n" /* label */ \ + _ASM_PTR "663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") /* * Alternative inline assembly with input. @@ -115,16 +109,35 @@ const unsigned char *const *find_nop_table(void); * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the * replacement make sure to pad to the worst case length. - * Leaving an unused argument 0 to keep API compatibility. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ - : : "i" (0), ## input) + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661b\n" /* label */ \ + _ASM_PTR "663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) /* Like alternative_input, but with a single output argument */ #define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ - : output : "i" (0), ## input) + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR "661b\n" /* label */ \ + _ASM_PTR "663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) /* * use this macro(s) if you need more than one output parameter diff --git a/trunk/arch/x86/include/asm/apic.h b/trunk/arch/x86/include/asm/apic.h index 229d0be184a2..42f2f8377422 100644 --- a/trunk/arch/x86/include/asm/apic.h +++ b/trunk/arch/x86/include/asm/apic.h @@ -107,7 +107,8 @@ extern u32 native_safe_apic_wait_icr_idle(void); extern void native_apic_icr_write(u32 low, u32 id); extern u64 native_apic_icr_read(void); -extern int x2apic_mode; +#define EIM_8BIT_APIC_ID 0 +#define EIM_32BIT_APIC_ID 1 #ifdef CONFIG_X86_X2APIC /* @@ -165,9 +166,10 @@ static inline u64 native_x2apic_icr_read(void) return val; } -extern int x2apic_phys; +extern int x2apic, x2apic_phys; extern void check_x2apic(void); extern void enable_x2apic(void); +extern void enable_IR_x2apic(void); extern void x2apic_icr_write(u32 low, u32 id); static inline int x2apic_enabled(void) { @@ -181,8 +183,6 @@ static inline int x2apic_enabled(void) return 1; return 0; } - -#define x2apic_supported() (cpu_has_x2apic) #else static inline void check_x2apic(void) { @@ -190,20 +190,28 @@ static inline void check_x2apic(void) static inline void enable_x2apic(void) { } +static inline void enable_IR_x2apic(void) +{ +} static inline int x2apic_enabled(void) { return 0; } -#define x2apic_preenabled 0 -#define x2apic_supported() 0 -#endif +#define x2apic 0 -extern void enable_IR_x2apic(void); +#endif extern int get_physical_broadcast(void); -extern void apic_disable(void); +#ifdef CONFIG_X86_X2APIC +static inline void ack_x2APIC_irq(void) +{ + /* Docs say use 0 for future compatibility */ + native_apic_msr_write(APIC_EOI, 0); +} +#endif + extern int lapic_get_maxlvt(void); extern void clear_local_APIC(void); extern void connect_bsp_APIC(void); @@ -244,7 +252,7 @@ static inline void lapic_shutdown(void) { } #define local_apic_timer_c2_ok 1 static inline void init_apic_mappings(void) { } static inline void disable_local_APIC(void) { } -static inline void apic_disable(void) { } + #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_64 @@ -470,9 +478,6 @@ static inline unsigned int read_apic_id(void) extern void default_setup_apic_routing(void); #ifdef CONFIG_X86_32 - -extern struct apic apic_default; - /* * Set up the logical destination ID. * diff --git a/trunk/arch/x86/include/asm/apicdef.h b/trunk/arch/x86/include/asm/apicdef.h index 7ddb36ab933b..bc9514fb3b13 100644 --- a/trunk/arch/x86/include/asm/apicdef.h +++ b/trunk/arch/x86/include/asm/apicdef.h @@ -22,7 +22,6 @@ # define APIC_INTEGRATED(x) (1) #endif #define APIC_XAPIC(x) ((x) >= 0x14) -#define APIC_EXT_SPACE(x) ((x) & 0x80000000) #define APIC_TASKPRI 0x80 #define APIC_TPRI_MASK 0xFFu #define APIC_ARBPRI 0x90 @@ -117,9 +116,7 @@ #define APIC_TDR_DIV_32 0x8 #define APIC_TDR_DIV_64 0x9 #define APIC_TDR_DIV_128 0xA -#define APIC_EFEAT 0x400 -#define APIC_ECTRL 0x410 -#define APIC_EILVTn(n) (0x500 + 0x10 * n) +#define APIC_EILVT0 0x500 #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ #define APIC_EILVT_NR_AMD_10H 4 #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) @@ -128,6 +125,9 @@ #define APIC_EILVT_MSG_NMI 0x4 #define APIC_EILVT_MSG_EXT 0x7 #define APIC_EILVT_MASKED (1 << 16) +#define APIC_EILVT1 0x510 +#define APIC_EILVT2 0x520 +#define APIC_EILVT3 0x530 #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #define APIC_BASE_MSR 0x800 diff --git a/trunk/arch/x86/include/asm/boot.h b/trunk/arch/x86/include/asm/boot.h index 418e632d4a80..6ba23dd9fc92 100644 --- a/trunk/arch/x86/include/asm/boot.h +++ b/trunk/arch/x86/include/asm/boot.h @@ -8,26 +8,11 @@ #ifdef __KERNEL__ -#include - /* Physical address where kernel should be loaded. */ #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) -/* Minimum kernel alignment, as a power of two */ -#ifdef CONFIG_x86_64 -#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT -#else -#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT+1) -#endif -#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) - -#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \ - (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)) -#error "Invalid value for CONFIG_PHYSICAL_ALIGN" -#endif - #ifdef CONFIG_KERNEL_BZIP2 #define BOOT_HEAP_SIZE 0x400000 #else /* !CONFIG_KERNEL_BZIP2 */ diff --git a/trunk/arch/x86/include/asm/bootparam.h b/trunk/arch/x86/include/asm/bootparam.h index 1724e8de317c..433adaebf9b6 100644 --- a/trunk/arch/x86/include/asm/bootparam.h +++ b/trunk/arch/x86/include/asm/bootparam.h @@ -50,8 +50,7 @@ struct setup_header { __u32 ramdisk_size; __u32 bootsect_kludge; __u16 heap_end_ptr; - __u8 ext_loader_ver; - __u8 ext_loader_type; + __u16 _pad1; __u32 cmd_line_ptr; __u32 initrd_addr_max; __u32 kernel_alignment; diff --git a/trunk/arch/x86/include/asm/cpu_debug.h b/trunk/arch/x86/include/asm/cpu_debug.h index d96c1ee3a95c..222802029fa6 100644 --- a/trunk/arch/x86/include/asm/cpu_debug.h +++ b/trunk/arch/x86/include/asm/cpu_debug.h @@ -86,7 +86,105 @@ enum cpu_file_bit { CPU_VALUE_BIT, /* value */ }; -#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) +#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) + +/* + * DisplayFamily_DisplayModel Processor Families/Processor Number Series + * -------------------------- ------------------------------------------ + * 05_01, 05_02, 05_04 Pentium, Pentium with MMX + * + * 06_01 Pentium Pro + * 06_03, 06_05 Pentium II Xeon, Pentium II + * 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III + * + * 06_09, 060D Pentium M + * + * 06_0E Core Duo, Core Solo + * + * 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series, + * Core 2 Quad, Core 2 Extreme, Core 2 Duo, + * Pentium dual-core + * 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650 + * + * 06_1C Atom + * + * 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4 + * 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D + * + * 0F_06 Xeon 7100, 5000 Series, Xeon MP, + * Pentium 4, Pentium D + */ + +/* Register processors bits */ +enum cpu_processor_bit { + CPU_NONE, +/* Intel */ + CPU_INTEL_PENTIUM_BIT, + CPU_INTEL_P6_BIT, + CPU_INTEL_PENTIUM_M_BIT, + CPU_INTEL_CORE_BIT, + CPU_INTEL_CORE2_BIT, + CPU_INTEL_ATOM_BIT, + CPU_INTEL_XEON_P4_BIT, + CPU_INTEL_XEON_MP_BIT, +/* AMD */ + CPU_AMD_K6_BIT, + CPU_AMD_K7_BIT, + CPU_AMD_K8_BIT, + CPU_AMD_0F_BIT, + CPU_AMD_10_BIT, + CPU_AMD_11_BIT, +}; + +#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT) +#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT) +#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT) +#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT) +#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT) +#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT) +#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT) +#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT) + +#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M) +#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2) +#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP) +#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM) +#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM) +#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM) +#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON) +#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON) +#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT) +#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON) +#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON) +#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT) +#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX) +#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE) +#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE) +#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT) +#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE) +#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT) +#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE) + +/* Select all supported Intel CPUs */ +#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE) + +#define CPU_AMD_K6 (1 << CPU_AMD_K6_BIT) +#define CPU_AMD_K7 (1 << CPU_AMD_K7_BIT) +#define CPU_AMD_K8 (1 << CPU_AMD_K8_BIT) +#define CPU_AMD_0F (1 << CPU_AMD_0F_BIT) +#define CPU_AMD_10 (1 << CPU_AMD_10_BIT) +#define CPU_AMD_11 (1 << CPU_AMD_11_BIT) + +#define CPU_K10_PLUS (CPU_AMD_10 | CPU_AMD_11) +#define CPU_K0F_PLUS (CPU_AMD_0F | CPU_K10_PLUS) +#define CPU_K8_PLUS (CPU_AMD_K8 | CPU_K0F_PLUS) +#define CPU_K7_PLUS (CPU_AMD_K7 | CPU_K8_PLUS) + +/* Select all supported AMD CPUs */ +#define CPU_AMD_ALL (CPU_AMD_K6 | CPU_K7_PLUS) + +/* Select all supported CPUs */ +#define CPU_ALL (CPU_INTEL_ALL | CPU_AMD_ALL) #define MAX_CPU_FILES 512 @@ -122,6 +220,7 @@ struct cpu_debug_range { unsigned min; /* Register range min */ unsigned max; /* Register range max */ unsigned flag; /* Supported flags */ + unsigned model; /* Supported models */ }; #endif /* _ASM_X86_CPU_DEBUG_H */ diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 13cc6a503a02..bb83b1c397aa 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -22,7 +22,7 @@ #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ @@ -192,11 +192,11 @@ extern const char * const x86_power_flags[32]; #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) #define setup_clear_cpu_cap(bit) do { \ clear_cpu_cap(&boot_cpu_data, bit); \ - set_bit(bit, (unsigned long *)cpu_caps_cleared); \ + set_bit(bit, (unsigned long *)cleared_cpu_caps); \ } while (0) #define setup_force_cpu_cap(bit) do { \ set_cpu_cap(&boot_cpu_data, bit); \ - set_bit(bit, (unsigned long *)cpu_caps_set); \ + clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ } while (0) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) diff --git a/trunk/arch/x86/include/asm/hw_irq.h b/trunk/arch/x86/include/asm/hw_irq.h index 3bd1777a4c8b..b762ea49bd70 100644 --- a/trunk/arch/x86/include/asm/hw_irq.h +++ b/trunk/arch/x86/include/asm/hw_irq.h @@ -63,26 +63,7 @@ extern unsigned long io_apic_irqs; extern void init_VISWS_APIC_irqs(void); extern void setup_IO_APIC(void); extern void disable_IO_APIC(void); - -struct io_apic_irq_attr { - int ioapic; - int ioapic_pin; - int trigger; - int polarity; -}; - -static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, - int ioapic, int ioapic_pin, - int trigger, int polarity) -{ - irq_attr->ioapic = ioapic; - irq_attr->ioapic_pin = ioapic_pin; - irq_attr->trigger = trigger; - irq_attr->polarity = polarity; -} - -extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, - struct io_apic_irq_attr *irq_attr); +extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); extern void setup_ioapic_dest(void); extern void enable_IO_APIC(void); @@ -97,11 +78,7 @@ extern void eisa_set_level_irq(unsigned int irq); /* SMP */ extern void smp_apic_timer_interrupt(struct pt_regs *); extern void smp_spurious_interrupt(struct pt_regs *); -extern void smp_generic_interrupt(struct pt_regs *); extern void smp_error_interrupt(struct pt_regs *); -#ifdef CONFIG_X86_IO_APIC -extern asmlinkage void smp_irq_move_cleanup_interrupt(void); -#endif #ifdef CONFIG_SMP extern void smp_reschedule_interrupt(struct pt_regs *); extern void smp_call_function_interrupt(struct pt_regs *); diff --git a/trunk/arch/x86/include/asm/i387.h b/trunk/arch/x86/include/asm/i387.h index 63d185087d91..71c9e5183982 100644 --- a/trunk/arch/x86/include/asm/i387.h +++ b/trunk/arch/x86/include/asm/i387.h @@ -67,7 +67,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) ".previous\n" _ASM_EXTABLE(1b, 3b) : [err] "=r" (err) -#if 0 /* See comment in fxsave() below. */ +#if 0 /* See comment in __save_init_fpu() below. */ : [fx] "r" (fx), "m" (*fx), "0" (0)); #else : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); @@ -75,6 +75,14 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) return err; } +static inline int restore_fpu_checking(struct task_struct *tsk) +{ + if (task_thread_info(tsk)->status & TS_XSAVE) + return xrstor_checking(&tsk->thread.xstate->xsave); + else + return fxrstor_checking(&tsk->thread.xstate->fxsave); +} + /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. The kernel data segment can be sometimes 0 and sometimes @@ -112,7 +120,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) ".previous\n" _ASM_EXTABLE(1b, 3b) : [err] "=r" (err), "=m" (*fx) -#if 0 /* See comment in fxsave() below. */ +#if 0 /* See comment in __fxsave_clear() below. */ : [fx] "r" (fx), "0" (0)); #else : [fx] "cdaSDb" (fx), "0" (0)); @@ -177,9 +185,12 @@ static inline void tolerant_fwait(void) asm volatile("fnclex ; fwait"); } -/* perform fxrstor iff the processor has extended states, otherwise frstor */ -static inline int fxrstor_checking(struct i387_fxsave_struct *fx) +static inline void restore_fpu(struct task_struct *tsk) { + if (task_thread_info(tsk)->status & TS_XSAVE) { + xrstor_checking(&tsk->thread.xstate->xsave); + return; + } /* * The "nop" is needed to make the instructions the same * length. @@ -188,9 +199,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) "nop ; frstor %1", "fxrstor %1", X86_FEATURE_FXSR, - "m" (*fx)); - - return 0; + "m" (tsk->thread.xstate->fxsave)); } /* We need a safe address that is cheap to find and that is already @@ -253,14 +262,6 @@ static inline void __save_init_fpu(struct task_struct *tsk) #endif /* CONFIG_X86_64 */ -static inline int restore_fpu_checking(struct task_struct *tsk) -{ - if (task_thread_info(tsk)->status & TS_XSAVE) - return xrstor_checking(&tsk->thread.xstate->xsave); - else - return fxrstor_checking(&tsk->thread.xstate->fxsave); -} - /* * Signal frame handlers... */ diff --git a/trunk/arch/x86/include/asm/i8259.h b/trunk/arch/x86/include/asm/i8259.h index 58d7091eeb1f..1a99e6c092af 100644 --- a/trunk/arch/x86/include/asm/i8259.h +++ b/trunk/arch/x86/include/asm/i8259.h @@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip; extern void mask_8259A(void); extern void unmask_8259A(void); +#ifdef CONFIG_X86_32 +extern void init_ISA_irqs(void); +#endif + #endif /* _ASM_X86_I8259_H */ diff --git a/trunk/arch/x86/include/asm/io_apic.h b/trunk/arch/x86/include/asm/io_apic.h index daf866ed0612..9d826e436010 100644 --- a/trunk/arch/x86/include/asm/io_apic.h +++ b/trunk/arch/x86/include/asm/io_apic.h @@ -154,19 +154,22 @@ extern int timer_through_8259; extern int io_apic_get_unique_id(int ioapic, int apic_id); extern int io_apic_get_version(int ioapic); extern int io_apic_get_redir_entries(int ioapic); +extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, + int edge_level, int active_high_low); #endif /* CONFIG_ACPI */ -struct io_apic_irq_attr; -extern int io_apic_set_pci_routing(struct device *dev, int irq, - struct io_apic_irq_attr *irq_attr); extern int (*ioapic_renumber_irq)(int ioapic, int irq); extern void ioapic_init_mappings(void); +#ifdef CONFIG_X86_64 extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); +extern void reinit_intr_remapped_IO_APIC(int intr_remapping, + struct IO_APIC_route_entry **ioapic_entries); +#endif extern void probe_nr_irqs_gsi(void); diff --git a/trunk/arch/x86/include/asm/iomap.h b/trunk/arch/x86/include/asm/iomap.h index 0e9fe1d9d971..86af26091d6c 100644 --- a/trunk/arch/x86/include/asm/iomap.h +++ b/trunk/arch/x86/include/asm/iomap.h @@ -1,6 +1,3 @@ -#ifndef _ASM_X86_IOMAP_H -#define _ASM_X86_IOMAP_H - /* * Copyright © 2008 Ingo Molnar * @@ -34,5 +31,3 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); void iounmap_atomic(void *kvaddr, enum km_type type); - -#endif /* _ASM_X86_IOMAP_H */ diff --git a/trunk/arch/x86/include/asm/irq_remapping.h b/trunk/arch/x86/include/asm/irq_remapping.h index f275e2244505..0396760fccb8 100644 --- a/trunk/arch/x86/include/asm/irq_remapping.h +++ b/trunk/arch/x86/include/asm/irq_remapping.h @@ -1,6 +1,6 @@ #ifndef _ASM_X86_IRQ_REMAPPING_H #define _ASM_X86_IRQ_REMAPPING_H -#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) +#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) #endif /* _ASM_X86_IRQ_REMAPPING_H */ diff --git a/trunk/arch/x86/include/asm/irq_vectors.h b/trunk/arch/x86/include/asm/irq_vectors.h index 910b5a3d6751..3cbd79bbb47c 100644 --- a/trunk/arch/x86/include/asm/irq_vectors.h +++ b/trunk/arch/x86/include/asm/irq_vectors.h @@ -34,7 +34,6 @@ #ifdef CONFIG_X86_32 # define SYSCALL_VECTOR 0x80 -# define IA32_SYSCALL_VECTOR 0x80 #else # define IA32_SYSCALL_VECTOR 0x80 #endif diff --git a/trunk/arch/x86/include/asm/k8.h b/trunk/arch/x86/include/asm/k8.h index c2d1f3b58e5f..54c8cc53b24d 100644 --- a/trunk/arch/x86/include/asm/k8.h +++ b/trunk/arch/x86/include/asm/k8.h @@ -12,17 +12,4 @@ extern int cache_k8_northbridges(void); extern void k8_flush_garts(void); extern int k8_scan_nodes(unsigned long start, unsigned long end); -#ifdef CONFIG_K8_NB -static inline struct pci_dev *node_to_k8_nb_misc(int node) -{ - return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; -} -#else -static inline struct pci_dev *node_to_k8_nb_misc(int node) -{ - return NULL; -} -#endif - - #endif /* _ASM_X86_K8_H */ diff --git a/trunk/arch/x86/include/asm/microcode.h b/trunk/arch/x86/include/asm/microcode.h index ef51b501e22a..c882664716c1 100644 --- a/trunk/arch/x86/include/asm/microcode.h +++ b/trunk/arch/x86/include/asm/microcode.h @@ -9,31 +9,20 @@ struct cpu_signature { struct device; -enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; - struct microcode_ops { - enum ucode_state (*request_microcode_user) (int cpu, - const void __user *buf, size_t size); + int (*request_microcode_user) (int cpu, const void __user *buf, size_t size); + int (*request_microcode_fw) (int cpu, struct device *device); - enum ucode_state (*request_microcode_fw) (int cpu, - struct device *device); + void (*apply_microcode) (int cpu); + int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); void (*microcode_fini_cpu) (int cpu); - - /* - * The generic 'microcode_core' part guarantees that - * the callbacks below run on a target cpu when they - * are being called. - * See also the "Synchronization" section in microcode_core.c. - */ - int (*apply_microcode) (int cpu); - int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); }; struct ucode_cpu_info { - struct cpu_signature cpu_sig; - int valid; - void *mc; + struct cpu_signature cpu_sig; + int valid; + void *mc; }; extern struct ucode_cpu_info ucode_cpu_info[]; diff --git a/trunk/arch/x86/include/asm/mpspec.h b/trunk/arch/x86/include/asm/mpspec.h index e2a1bb6d71ea..642fc7fc8cdc 100644 --- a/trunk/arch/x86/include/asm/mpspec.h +++ b/trunk/arch/x86/include/asm/mpspec.h @@ -61,11 +61,9 @@ extern void get_smp_config(void); #ifdef CONFIG_X86_MPPARSE extern void find_smp_config(void); extern void early_reserve_e820_mpc_new(void); -extern int enable_update_mptable; #else static inline void find_smp_config(void) { } static inline void early_reserve_e820_mpc_new(void) { } -#define enable_update_mptable 0 #endif void __cpuinit generic_processor_info(int apicid, int version); @@ -74,13 +72,20 @@ extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); extern void mp_config_acpi_legacy_irqs(void); -struct device; -extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, - int active_high_low); +extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); extern int acpi_probe_gsi(void); #ifdef CONFIG_X86_IO_APIC +extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, + u32 gsi, int triggering, int polarity); extern int mp_find_ioapic(int gsi); extern int mp_find_ioapic_pin(int ioapic, int gsi); +#else +static inline int +mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, + u32 gsi, int triggering, int polarity) +{ + return 0; +} #endif #else /* !CONFIG_ACPI: */ static inline int acpi_probe_gsi(void) diff --git a/trunk/arch/x86/include/asm/msr-index.h b/trunk/arch/x86/include/asm/msr-index.h index 4d58d04fca83..ec41fc16c167 100644 --- a/trunk/arch/x86/include/asm/msr-index.h +++ b/trunk/arch/x86/include/asm/msr-index.h @@ -121,6 +121,7 @@ #define MSR_K8_TOP_MEM1 0xc001001a #define MSR_K8_TOP_MEM2 0xc001001d #define MSR_K8_SYSCFG 0xc0010010 +#define MSR_K8_HWCR 0xc0010015 #define MSR_K8_INT_PENDING_MSG 0xc0010055 /* C1E active bits in int pending message */ #define K8_INTP_C1E_ACTIVE_MASK 0x18000000 diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h index c97264409934..c45a0a568dff 100644 --- a/trunk/arch/x86/include/asm/nmi.h +++ b/trunk/arch/x86/include/asm/nmi.h @@ -64,7 +64,7 @@ static inline int nmi_watchdog_active(void) * but since they are power of two we could use a * cheaper way --cvg */ - return nmi_watchdog & (NMI_LOCAL_APIC | NMI_IO_APIC); + return nmi_watchdog & 0x3; } #endif diff --git a/trunk/arch/x86/include/asm/numa_64.h b/trunk/arch/x86/include/asm/numa_64.h index c4ae822e415f..064ed6df4cbe 100644 --- a/trunk/arch/x86/include/asm/numa_64.h +++ b/trunk/arch/x86/include/asm/numa_64.h @@ -17,6 +17,9 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks, extern void numa_init_array(void); extern int numa_off; +extern void srat_reserve_add_area(int nodeid); +extern int hotadd_percent; + extern s16 apicid_to_node[MAX_LOCAL_APIC]; extern unsigned long numa_free_all_bootmem(void); @@ -24,13 +27,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); #ifdef CONFIG_NUMA -/* - * Too small node sizes may confuse the VM badly. Usually they - * result from BIOS bugs. So dont recognize nodes as standalone - * NUMA entities that have less than this amount of RAM listed: - */ -#define NODE_MIN_SIZE (4*1024*1024) - extern void __init init_cpu_to_node(void); extern void __cpuinit numa_set_node(int cpu, int node); extern void __cpuinit numa_clear_node(int cpu); diff --git a/trunk/arch/x86/include/asm/page_32_types.h b/trunk/arch/x86/include/asm/page_32_types.h index 6f1b7331313f..0f915ae649a7 100644 --- a/trunk/arch/x86/include/asm/page_32_types.h +++ b/trunk/arch/x86/include/asm/page_32_types.h @@ -54,6 +54,10 @@ extern unsigned int __VMALLOC_RESERVE; extern int sysctl_legacy_va_layout; extern void find_low_pfn_range(void); +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); +extern void initmem_init(unsigned long, unsigned long); +extern void free_initmem(void); extern void setup_bootmem_allocator(void); #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/x86/include/asm/page_64_types.h b/trunk/arch/x86/include/asm/page_64_types.h index 8d382d3abf38..d38c91b70248 100644 --- a/trunk/arch/x86/include/asm/page_64_types.h +++ b/trunk/arch/x86/include/asm/page_64_types.h @@ -32,14 +32,22 @@ */ #define __PAGE_OFFSET _AC(0xffff880000000000, UL) -#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \ - (CONFIG_PHYSICAL_ALIGN - 1)) & \ - ~(CONFIG_PHYSICAL_ALIGN - 1)) +#define __PHYSICAL_START CONFIG_PHYSICAL_START +#define __KERNEL_ALIGN 0x200000 + +/* + * Make sure kernel is aligned to 2MB address. Catching it at compile + * time is better. Change your config file and compile the kernel + * for a 2MB aligned address (CONFIG_PHYSICAL_START) + */ +#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 +#error "CONFIG_PHYSICAL_START must be a multiple of 2MB" +#endif #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) #define __START_KERNEL_map _AC(0xffffffff80000000, UL) -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ +/* See Documentation/x86_64/mm.txt for a description of the memory map. */ #define __PHYSICAL_MASK_SHIFT 46 #define __VIRTUAL_MASK_SHIFT 48 @@ -63,6 +71,12 @@ extern unsigned long __phys_addr(unsigned long); #define vmemmap ((struct page *)VMEMMAP_START) +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); + +extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); +extern void free_initmem(void); + extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); diff --git a/trunk/arch/x86/include/asm/page_types.h b/trunk/arch/x86/include/asm/page_types.h index 6473f5ccff85..826ad37006ab 100644 --- a/trunk/arch/x86/include/asm/page_types.h +++ b/trunk/arch/x86/include/asm/page_types.h @@ -46,12 +46,6 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; -extern unsigned long init_memory_mapping(unsigned long start, - unsigned long end); - -extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); -extern void free_initmem(void); - #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PAGE_DEFS_H */ diff --git a/trunk/arch/x86/include/asm/pgtable.h b/trunk/arch/x86/include/asm/pgtable.h index 3f8d09d94eb3..29d96d168bc0 100644 --- a/trunk/arch/x86/include/asm/pgtable.h +++ b/trunk/arch/x86/include/asm/pgtable.h @@ -503,8 +503,6 @@ static inline int pgd_none(pgd_t pgd) #ifndef __ASSEMBLY__ -extern int direct_gbpages; - /* local pte updates need not use xchg for locking */ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) { diff --git a/trunk/arch/x86/include/asm/pgtable_64.h b/trunk/arch/x86/include/asm/pgtable_64.h index abde308fdb0f..6b87bc6d5018 100644 --- a/trunk/arch/x86/include/asm/pgtable_64.h +++ b/trunk/arch/x86/include/asm/pgtable_64.h @@ -25,6 +25,10 @@ extern pgd_t init_level4_pgt[]; extern void paging_init(void); +#endif /* !__ASSEMBLY__ */ + +#ifndef __ASSEMBLY__ + #define pte_ERROR(e) \ printk("%s:%d: bad pte %p(%016lx).\n", \ __FILE__, __LINE__, &(e), pte_val(e)) @@ -131,6 +135,8 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define update_mmu_cache(vma, address, pte) do { } while (0) +extern int direct_gbpages; + /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) diff --git a/trunk/arch/x86/include/asm/pgtable_64_types.h b/trunk/arch/x86/include/asm/pgtable_64_types.h index 766ea16fbbbd..fbf42b8e0383 100644 --- a/trunk/arch/x86/include/asm/pgtable_64_types.h +++ b/trunk/arch/x86/include/asm/pgtable_64_types.h @@ -51,11 +51,11 @@ typedef struct { pteval_t pte; } pte_t; #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ + #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) -#define VMALLOC_START _AC(0xffffc90000000000, UL) -#define VMALLOC_END _AC(0xffffe8ffffffffff, UL) -#define VMEMMAP_START _AC(0xffffea0000000000, UL) +#define VMALLOC_START _AC(0xffffc20000000000, UL) +#define VMALLOC_END _AC(0xffffe1ffffffffff, UL) +#define VMEMMAP_START _AC(0xffffe20000000000, UL) #define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) diff --git a/trunk/arch/x86/include/asm/pgtable_types.h b/trunk/arch/x86/include/asm/pgtable_types.h index 4d258ad76a0f..b8238dc8786d 100644 --- a/trunk/arch/x86/include/asm/pgtable_types.h +++ b/trunk/arch/x86/include/asm/pgtable_types.h @@ -273,6 +273,7 @@ typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern int nx_enabled; +extern void set_nx(void); #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); diff --git a/trunk/arch/x86/include/asm/processor.h b/trunk/arch/x86/include/asm/processor.h index 87ede2f31bc7..c2cceae709c8 100644 --- a/trunk/arch/x86/include/asm/processor.h +++ b/trunk/arch/x86/include/asm/processor.h @@ -135,8 +135,7 @@ extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; extern struct tss_struct doublefault_tss; -extern __u32 cpu_caps_cleared[NCAPINTS]; -extern __u32 cpu_caps_set[NCAPINTS]; +extern __u32 cleared_cpu_caps[NCAPINTS]; #ifdef CONFIG_SMP DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); @@ -410,6 +409,9 @@ DECLARE_PER_CPU(unsigned long, stack_canary); extern unsigned int xstate_size; extern void free_thread_xstate(struct task_struct *); extern struct kmem_cache *task_xstate_cachep; +extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); +extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); +extern unsigned short num_cache_leaves; struct thread_struct { /* Cached TLS descriptors: */ @@ -425,12 +427,8 @@ struct thread_struct { unsigned short fsindex; unsigned short gsindex; #endif -#ifdef CONFIG_X86_32 unsigned long ip; -#endif -#ifdef CONFIG_X86_64 unsigned long fs; -#endif unsigned long gs; /* Hardware debugging registers: */ unsigned long debugreg0; @@ -816,7 +814,6 @@ extern unsigned int BIOS_revision; /* Boot loader type from the setup header: */ extern int bootloader_type; -extern int bootloader_version; extern char ignore_fpu_irq; @@ -877,6 +874,7 @@ static inline void spin_lock_prefetch(const void *x) .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ + .fs = __KERNEL_PERCPU, \ } /* diff --git a/trunk/arch/x86/include/asm/setup.h b/trunk/arch/x86/include/asm/setup.h index 4093d1ed6db2..bdc2ada05ae0 100644 --- a/trunk/arch/x86/include/asm/setup.h +++ b/trunk/arch/x86/include/asm/setup.h @@ -33,6 +33,7 @@ struct x86_quirks { int (*setup_ioapic_ids)(void); }; +extern void x86_quirk_pre_intr_init(void); extern void x86_quirk_intr_init(void); extern void x86_quirk_trap_init(void); diff --git a/trunk/arch/x86/include/asm/smp.h b/trunk/arch/x86/include/asm/smp.h index 6a84ed166aec..19e0d88b966d 100644 --- a/trunk/arch/x86/include/asm/smp.h +++ b/trunk/arch/x86/include/asm/smp.h @@ -180,7 +180,7 @@ extern int safe_smp_processor_id(void); static inline int logical_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); + return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); } #endif diff --git a/trunk/arch/x86/include/asm/sparsemem.h b/trunk/arch/x86/include/asm/sparsemem.h index 4517d6b93188..e3cc3c063ec5 100644 --- a/trunk/arch/x86/include/asm/sparsemem.h +++ b/trunk/arch/x86/include/asm/sparsemem.h @@ -27,7 +27,7 @@ #else /* CONFIG_X86_32 */ # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ # define MAX_PHYSADDR_BITS 44 -# define MAX_PHYSMEM_BITS 46 +# define MAX_PHYSMEM_BITS 44 /* Can be max 45 bits */ #endif #endif /* CONFIG_SPARSEMEM */ diff --git a/trunk/arch/x86/include/asm/syscalls.h b/trunk/arch/x86/include/asm/syscalls.h index 372b76edd63f..7043408f6904 100644 --- a/trunk/arch/x86/include/asm/syscalls.h +++ b/trunk/arch/x86/include/asm/syscalls.h @@ -1,7 +1,7 @@ /* * syscalls.h - Linux syscall interfaces (arch-specific) * - * Copyright (c) 2008 Jaswinder Singh Rajput + * Copyright (c) 2008 Jaswinder Singh * * This file is released under the GPLv2. * See the file COPYING for more details. @@ -12,55 +12,50 @@ #include #include -#include #include +#include /* Common in X86_32 and X86_64 */ /* kernel/ioport.c */ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); -/* kernel/process.c */ -int sys_fork(struct pt_regs *); -int sys_vfork(struct pt_regs *); - /* kernel/ldt.c */ asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); -/* kernel/signal.c */ -long sys_rt_sigreturn(struct pt_regs *); - /* kernel/tls.c */ asmlinkage int sys_set_thread_area(struct user_desc __user *); asmlinkage int sys_get_thread_area(struct user_desc __user *); /* X86_32 only */ #ifdef CONFIG_X86_32 -/* kernel/ioport.c */ -long sys_iopl(struct pt_regs *); - /* kernel/process_32.c */ +int sys_fork(struct pt_regs *); int sys_clone(struct pt_regs *); +int sys_vfork(struct pt_regs *); int sys_execve(struct pt_regs *); -/* kernel/signal.c */ +/* kernel/signal_32.c */ asmlinkage int sys_sigsuspend(int, int, old_sigset_t); asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); int sys_sigaltstack(struct pt_regs *); unsigned long sys_sigreturn(struct pt_regs *); +long sys_rt_sigreturn(struct pt_regs *); -/* kernel/sys_i386_32.c */ -struct mmap_arg_struct; -struct sel_arg_struct; -struct oldold_utsname; -struct old_utsname; +/* kernel/ioport.c */ +long sys_iopl(struct pt_regs *); +/* kernel/sys_i386_32.c */ asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +struct mmap_arg_struct; asmlinkage int old_mmap(struct mmap_arg_struct __user *); +struct sel_arg_struct; asmlinkage int old_select(struct sel_arg_struct __user *); asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); +struct old_utsname; asmlinkage int sys_uname(struct old_utsname __user *); +struct oldold_utsname; asmlinkage int sys_olduname(struct oldold_utsname __user *); /* kernel/vm86_32.c */ @@ -70,27 +65,29 @@ int sys_vm86(struct pt_regs *); #else /* CONFIG_X86_32 */ /* X86_64 only */ -/* kernel/ioport.c */ -asmlinkage long sys_iopl(unsigned int, struct pt_regs *); - /* kernel/process_64.c */ +asmlinkage long sys_fork(struct pt_regs *); asmlinkage long sys_clone(unsigned long, unsigned long, void __user *, void __user *, struct pt_regs *); +asmlinkage long sys_vfork(struct pt_regs *); asmlinkage long sys_execve(char __user *, char __user * __user *, char __user * __user *, struct pt_regs *); long sys_arch_prctl(int, unsigned long); -/* kernel/signal.c */ +/* kernel/ioport.c */ +asmlinkage long sys_iopl(unsigned int, struct pt_regs *); + +/* kernel/signal_64.c */ asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, struct pt_regs *); +long sys_rt_sigreturn(struct pt_regs *); /* kernel/sys_x86_64.c */ -struct new_utsname; - asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +struct new_utsname; asmlinkage long sys_uname(struct new_utsname __user *); #endif /* CONFIG_X86_32 */ diff --git a/trunk/arch/x86/include/asm/topology.h b/trunk/arch/x86/include/asm/topology.h index 066ef590d7e0..f44b49abca49 100644 --- a/trunk/arch/x86/include/asm/topology.h +++ b/trunk/arch/x86/include/asm/topology.h @@ -203,8 +203,7 @@ struct pci_bus; void x86_pci_root_bus_res_quirks(struct pci_bus *b); #ifdef CONFIG_SMP -#define mc_capable() ((boot_cpu_data.x86_max_cores > 1) && \ - (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)) +#define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) #define smt_capable() (smp_num_siblings > 1) #endif diff --git a/trunk/arch/x86/include/asm/traps.h b/trunk/arch/x86/include/asm/traps.h index cbfdc26b1460..0d5342515b86 100644 --- a/trunk/arch/x86/include/asm/traps.h +++ b/trunk/arch/x86/include/asm/traps.h @@ -2,7 +2,6 @@ #define _ASM_X86_TRAPS_H #include -#include /* TRAP_TRACE, ... */ #ifdef CONFIG_X86_32 #define dotraplinkage @@ -75,6 +74,7 @@ static inline int get_si_code(unsigned long condition) } extern int panic_on_unrecovered_nmi; +extern int kstack_depth_to_print; void math_error(void __user *); void math_emulate(struct math_emu_info *); diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h index 9b0e61bf7a88..bddd44f2f0ab 100644 --- a/trunk/arch/x86/include/asm/uv/uv_bau.h +++ b/trunk/arch/x86/include/asm/uv/uv_bau.h @@ -37,7 +37,7 @@ #define UV_CPUS_PER_ACT_STATUS 32 #define UV_ACT_STATUS_MASK 0x3 #define UV_ACT_STATUS_SIZE 2 -#define UV_ACTIVATION_DESCRIPTOR_SIZE 32 +#define UV_ADP_SIZE 32 #define UV_DISTRIBUTION_SIZE 256 #define UV_SW_ACK_NPENDING 8 #define UV_NET_ENDPOINT_INTD 0x38 diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 235f5927bb97..88d1bfc847d3 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp) obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o -obj-y += setup.o i8259.o irqinit.o +obj-y += setup.o i8259.o irqinit_$(BITS).o obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o diff --git a/trunk/arch/x86/kernel/acpi/boot.c b/trunk/arch/x86/kernel/acpi/boot.c index 631086159c53..723989d7f802 100644 --- a/trunk/arch/x86/kernel/acpi/boot.c +++ b/trunk/arch/x86/kernel/acpi/boot.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include @@ -523,7 +522,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) * success: return IRQ number (>=0) * failure: return < 0 */ -int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +int acpi_register_gsi(u32 gsi, int triggering, int polarity) { unsigned int irq; unsigned int plat_gsi = gsi; @@ -533,14 +532,14 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) * Make sure all (legacy) PCI IRQs are set as level-triggered. */ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { - if (trigger == ACPI_LEVEL_SENSITIVE) + if (triggering == ACPI_LEVEL_SENSITIVE) eisa_set_level_irq(gsi); } #endif #ifdef CONFIG_X86_IO_APIC if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { - plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); + plat_gsi = mp_register_gsi(gsi, triggering, polarity); } #endif acpi_gsi_to_irq(plat_gsi, &irq); @@ -904,8 +903,10 @@ extern int es7000_plat; #endif static struct { + int apic_id; int gsi_base; int gsi_end; + DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); } mp_ioapic_routing[MAX_IO_APICS]; int mp_find_ioapic(int gsi) @@ -985,12 +986,16 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); mp_ioapics[idx].apicid = uniq_ioapic_id(id); +#ifdef CONFIG_X86_32 mp_ioapics[idx].apicver = io_apic_get_version(idx); - +#else + mp_ioapics[idx].apicver = 0; +#endif /* * Build basic GSI lookup table to facilitate gsi->io_apic lookups * and to prevent reprogramming of IOAPIC pins (PCI GSIs). */ + mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid; mp_ioapic_routing[idx].gsi_base = gsi_base; mp_ioapic_routing[idx].gsi_end = gsi_base + io_apic_get_redir_entries(idx); @@ -1153,52 +1158,26 @@ void __init mp_config_acpi_legacy_irqs(void) } } -static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, - int polarity) -{ -#ifdef CONFIG_X86_MPPARSE - struct mpc_intsrc mp_irq; - struct pci_dev *pdev; - unsigned char number; - unsigned int devfn; - int ioapic; - u8 pin; - - if (!acpi_ioapic) - return 0; - if (!dev) - return 0; - if (dev->bus != &pci_bus_type) - return 0; - - pdev = to_pci_dev(dev); - number = pdev->bus->number; - devfn = pdev->devfn; - pin = pdev->pin; - /* print the entry should happen on mptable identically */ - mp_irq.type = MP_INTSRC; - mp_irq.irqtype = mp_INT; - mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | - (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); - mp_irq.srcbus = number; - mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); - ioapic = mp_find_ioapic(gsi); - mp_irq.dstapic = mp_ioapics[ioapic].apicid; - mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); - - save_mp_irq(&mp_irq); -#endif - return 0; -} - -int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +int mp_register_gsi(u32 gsi, int triggering, int polarity) { int ioapic; int ioapic_pin; - struct io_apic_irq_attr irq_attr; +#ifdef CONFIG_X86_32 +#define MAX_GSI_NUM 4096 +#define IRQ_COMPRESSION_START 64 + + static int pci_irq = IRQ_COMPRESSION_START; + /* + * Mapping between Global System Interrupts, which + * represent all possible interrupts, and IRQs + * assigned to actual devices. + */ + static int gsi_to_irq[MAX_GSI_NUM]; +#else if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) return gsi; +#endif /* Don't set up the ACPI SCI because it's already set up */ if (acpi_gbl_FADT.sci_interrupt == gsi) @@ -1217,22 +1196,93 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) gsi = ioapic_renumber_irq(ioapic, gsi); #endif + /* + * Avoid pin reprogramming. PRTs typically include entries + * with redundant pin->gsi mappings (but unique PCI devices); + * we only program the IOAPIC on the first. + */ if (ioapic_pin > MP_MAX_IOAPIC_PIN) { printk(KERN_ERR "Invalid reference to IOAPIC pin " - "%d-%d\n", mp_ioapics[ioapic].apicid, + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, ioapic_pin); return gsi; } + if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) { + pr_debug("Pin %d-%d already programmed\n", + mp_ioapic_routing[ioapic].apic_id, ioapic_pin); +#ifdef CONFIG_X86_32 + return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]); +#else + return gsi; +#endif + } + + set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed); +#ifdef CONFIG_X86_32 + /* + * For GSI >= 64, use IRQ compression + */ + if ((gsi >= IRQ_COMPRESSION_START) + && (triggering == ACPI_LEVEL_SENSITIVE)) { + /* + * For PCI devices assign IRQs in order, avoiding gaps + * due to unused I/O APIC pins. + */ + int irq = gsi; + if (gsi < MAX_GSI_NUM) { + /* + * Retain the VIA chipset work-around (gsi > 15), but + * avoid a problem where the 8254 timer (IRQ0) is setup + * via an override (so it's not on pin 0 of the ioapic), + * and at the same time, the pin 0 interrupt is a PCI + * type. The gsi > 15 test could cause these two pins + * to be shared as IRQ0, and they are not shareable. + * So test for this condition, and if necessary, avoid + * the pin collision. + */ + gsi = pci_irq++; + /* + * Don't assign IRQ used by ACPI SCI + */ + if (gsi == acpi_gbl_FADT.sci_interrupt) + gsi = pci_irq++; + gsi_to_irq[irq] = gsi; + } else { + printk(KERN_ERR "GSI %u is too high\n", gsi); + return gsi; + } + } +#endif + io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, + triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, + polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + return gsi; +} - if (enable_update_mptable) - mp_config_acpi_gsi(dev, gsi, trigger, polarity); +int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, + u32 gsi, int triggering, int polarity) +{ +#ifdef CONFIG_X86_MPPARSE + struct mpc_intsrc mp_irq; + int ioapic; - set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, - trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, - polarity == ACPI_ACTIVE_HIGH ? 0 : 1); - io_apic_set_pci_routing(dev, gsi, &irq_attr); + if (!acpi_ioapic) + return 0; - return gsi; + /* print the entry should happen on mptable identically */ + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | + (polarity == ACPI_ACTIVE_HIGH ? 1 : 3); + mp_irq.srcbus = number; + mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); + ioapic = mp_find_ioapic(gsi); + mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id; + mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi); + + save_mp_irq(&mp_irq); +#endif + return 0; } /* diff --git a/trunk/arch/x86/kernel/acpi/realmode/Makefile b/trunk/arch/x86/kernel/acpi/realmode/Makefile index 167bc16ce0e5..1c31cc0e9def 100644 --- a/trunk/arch/x86/kernel/acpi/realmode/Makefile +++ b/trunk/arch/x86/kernel/acpi/realmode/Makefile @@ -9,7 +9,7 @@ always := wakeup.bin targets := wakeup.elf wakeup.lds -wakeup-y += wakeup.o wakemain.o video-mode.o copy.o bioscall.o regs.o +wakeup-y += wakeup.o wakemain.o video-mode.o copy.o # The link order of the video-*.o modules can matter. In particular, # video-vga.o *must* be listed first, followed by video-vesa.o. diff --git a/trunk/arch/x86/kernel/acpi/realmode/bioscall.S b/trunk/arch/x86/kernel/acpi/realmode/bioscall.S deleted file mode 100644 index f51eb0bb56ce..000000000000 --- a/trunk/arch/x86/kernel/acpi/realmode/bioscall.S +++ /dev/null @@ -1 +0,0 @@ -#include "../../../boot/bioscall.S" diff --git a/trunk/arch/x86/kernel/acpi/realmode/regs.c b/trunk/arch/x86/kernel/acpi/realmode/regs.c deleted file mode 100644 index 6206033ba202..000000000000 --- a/trunk/arch/x86/kernel/acpi/realmode/regs.c +++ /dev/null @@ -1 +0,0 @@ -#include "../../../boot/regs.c" diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c index a4c9cf0bf70b..f2870920f246 100644 --- a/trunk/arch/x86/kernel/apic/apic.c +++ b/trunk/arch/x86/kernel/apic/apic.c @@ -98,29 +98,6 @@ early_param("lapic", parse_lapic); /* Local APIC was disabled by the BIOS and enabled by the kernel */ static int enabled_via_apicbase; -/* - * Handle interrupt mode configuration register (IMCR). - * This register controls whether the interrupt signals - * that reach the BSP come from the master PIC or from the - * local APIC. Before entering Symmetric I/O Mode, either - * the BIOS or the operating system must switch out of - * PIC Mode by changing the IMCR. - */ -static inline void imcr_pic_to_apic(void) -{ - /* select IMCR register */ - outb(0x70, 0x22); - /* NMI and 8259 INTR go through APIC */ - outb(0x01, 0x23); -} - -static inline void imcr_apic_to_pic(void) -{ - /* select IMCR register */ - outb(0x70, 0x22); - /* NMI and 8259 INTR go directly to BSP */ - outb(0x00, 0x23); -} #endif #ifdef CONFIG_X86_64 @@ -134,19 +111,13 @@ static __init int setup_apicpmtimer(char *s) __setup("apicpmtimer", setup_apicpmtimer); #endif -int x2apic_mode; #ifdef CONFIG_X86_X2APIC +int x2apic; /* x2apic enabled before OS handover */ static int x2apic_preenabled; static int disable_x2apic; static __init int setup_nox2apic(char *str) { - if (x2apic_enabled()) { - pr_warning("Bios already enabled x2apic, " - "can't enforce nox2apic"); - return 0; - } - disable_x2apic = 1; setup_clear_cpu_cap(X86_FEATURE_X2APIC); return 0; @@ -238,31 +209,6 @@ static int modern_apic(void) return lapic_get_version() >= 0x14; } -/* - * bare function to substitute write operation - * and it's _that_ fast :) - */ -static void native_apic_write_dummy(u32 reg, u32 v) -{ - WARN_ON_ONCE((cpu_has_apic || !disable_apic)); -} - -static u32 native_apic_read_dummy(u32 reg) -{ - WARN_ON_ONCE((cpu_has_apic && !disable_apic)); - return 0; -} - -/* - * right after this call apic->write/read doesn't do anything - * note that there is no restore operation it works one way - */ -void apic_disable(void) -{ - apic->read = native_apic_read_dummy; - apic->write = native_apic_write_dummy; -} - void native_apic_wait_icr_idle(void) { while (apic_read(APIC_ICR) & APIC_ICR_BUSY) @@ -402,7 +348,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) { - unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); + unsigned long reg = (lvt_off << 4) + APIC_EILVT0; unsigned int v = (mask << 16) | (msg_type << 8) | vector; apic_write(reg, v); @@ -869,7 +815,7 @@ void clear_local_APIC(void) u32 v; /* APIC hasn't been mapped yet */ - if (!x2apic_mode && !apic_phys) + if (!x2apic && !apic_phys) return; maxlvt = lapic_get_maxlvt(); @@ -1341,7 +1287,7 @@ void check_x2apic(void) { if (x2apic_enabled()) { pr_info("x2apic enabled by BIOS, switching to x2apic ops\n"); - x2apic_preenabled = x2apic_mode = 1; + x2apic_preenabled = x2apic = 1; } } @@ -1349,7 +1295,7 @@ void enable_x2apic(void) { int msr, msr2; - if (!x2apic_mode) + if (!x2apic) return; rdmsr(MSR_IA32_APICBASE, msr, msr2); @@ -1358,7 +1304,6 @@ void enable_x2apic(void) wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); } } -#endif /* CONFIG_X86_X2APIC */ void __init enable_IR_x2apic(void) { @@ -1367,21 +1312,32 @@ void __init enable_IR_x2apic(void) unsigned long flags; struct IO_APIC_route_entry **ioapic_entries = NULL; - ret = dmar_table_init(); - if (ret) { - pr_debug("dmar_table_init() failed with %d:\n", ret); - goto ir_failed; - } + if (!cpu_has_x2apic) + return; - if (!intr_remapping_supported()) { - pr_debug("intr-remapping not supported\n"); - goto ir_failed; + if (!x2apic_preenabled && disable_x2apic) { + pr_info("Skipped enabling x2apic and Interrupt-remapping " + "because of nox2apic\n"); + return; } + if (x2apic_preenabled && disable_x2apic) + panic("Bios already enabled x2apic, can't enforce nox2apic"); if (!x2apic_preenabled && skip_ioapic_setup) { - pr_info("Skipped enabling intr-remap because of skipping " - "io-apic setup\n"); + pr_info("Skipped enabling x2apic and Interrupt-remapping " + "because of skipping io-apic setup\n"); + return; + } + + ret = dmar_table_init(); + if (ret) { + pr_info("dmar_table_init() failed with %d:\n", ret); + + if (x2apic_preenabled) + panic("x2apic enabled by bios. But IR enabling failed"); + else + pr_info("Not enabling x2apic,Intr-remapping\n"); return; } @@ -1401,16 +1357,19 @@ void __init enable_IR_x2apic(void) mask_IO_APIC_setup(ioapic_entries); mask_8259A(); - ret = enable_intr_remapping(x2apic_supported()); + ret = enable_intr_remapping(EIM_32BIT_APIC_ID); + + if (ret && x2apic_preenabled) { + local_irq_restore(flags); + panic("x2apic enabled by bios. But IR enabling failed"); + } + if (ret) goto end_restore; - pr_info("Enabled Interrupt-remapping\n"); - - if (x2apic_supported() && !x2apic_mode) { - x2apic_mode = 1; + if (!x2apic) { + x2apic = 1; enable_x2apic(); - pr_info("Enabled x2apic\n"); } end_restore: @@ -1419,34 +1378,37 @@ void __init enable_IR_x2apic(void) * IR enabling failed */ restore_IO_APIC_setup(ioapic_entries); + else + reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries); unmask_8259A(); local_irq_restore(flags); end: + if (!ret) { + if (!x2apic_preenabled) + pr_info("Enabled x2apic and interrupt-remapping\n"); + else + pr_info("Enabled Interrupt-remapping\n"); + } else + pr_err("Failed to enable Interrupt-remapping and x2apic\n"); if (ioapic_entries) free_ioapic_entries(ioapic_entries); - - if (!ret) - return; - -ir_failed: - if (x2apic_preenabled) - panic("x2apic enabled by bios. But IR enabling failed"); - else if (cpu_has_x2apic) - pr_info("Not enabling x2apic,Intr-remapping\n"); #else if (!cpu_has_x2apic) return; if (x2apic_preenabled) panic("x2apic enabled prior OS handover," - " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP"); + " enable CONFIG_INTR_REMAP"); + + pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping " + " and x2apic\n"); #endif return; } - +#endif /* CONFIG_X86_X2APIC */ #ifdef CONFIG_X86_64 /* @@ -1463,6 +1425,7 @@ static int __init detect_init_APIC(void) } mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; + boot_cpu_physical_apicid = 0; return 0; } #else @@ -1576,49 +1539,32 @@ void __init early_init_lapic_mapping(void) */ void __init init_apic_mappings(void) { - unsigned int new_apicid; - - if (x2apic_mode) { + if (x2apic) { boot_cpu_physical_apicid = read_apic_id(); return; } - /* If no local APIC can be found return early */ + /* + * If no local APIC can be found then set up a fake all + * zeroes page to simulate the local APIC and another + * one for the IO-APIC. + */ if (!smp_found_config && detect_init_APIC()) { - /* lets NOP'ify apic operations */ - pr_info("APIC: disable apic facility\n"); - apic_disable(); - } else { + apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); + apic_phys = __pa(apic_phys); + } else apic_phys = mp_lapic_addr; - /* - * acpi lapic path already maps that address in - * acpi_register_lapic_address() - */ - if (!acpi_lapic) - set_fixmap_nocache(FIX_APIC_BASE, apic_phys); - - apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", - APIC_BASE, apic_phys); - } + set_fixmap_nocache(FIX_APIC_BASE, apic_phys); + apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", + APIC_BASE, apic_phys); /* * Fetch the APIC ID of the BSP in case we have a * default configuration (or the MP table is broken). */ - new_apicid = read_apic_id(); - if (boot_cpu_physical_apicid != new_apicid) { - boot_cpu_physical_apicid = new_apicid; - /* - * yeah -- we lie about apic_version - * in case if apic was disabled via boot option - * but it's not a problem for SMP compiled kernel - * since smp_sanity_check is prepared for such a case - * and disable smp mode - */ - apic_version[new_apicid] = - GET_APIC_VERSION(apic_read(APIC_LVR)); - } + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = read_apic_id(); } /* @@ -1787,7 +1733,8 @@ void __init connect_bsp_APIC(void) */ apic_printk(APIC_VERBOSE, "leaving PIC mode, " "enabling APIC mode.\n"); - imcr_pic_to_apic(); + outb(0x70, 0x22); + outb(0x01, 0x23); } #endif if (apic->enable_apic_mode) @@ -1815,7 +1762,8 @@ void disconnect_bsp_APIC(int virt_wire_setup) */ apic_printk(APIC_VERBOSE, "disabling APIC mode, " "entering PIC mode.\n"); - imcr_apic_to_pic(); + outb(0x70, 0x22); + outb(0x00, 0x23); return; } #endif @@ -2021,10 +1969,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) local_irq_save(flags); disable_local_APIC(); - +#ifdef CONFIG_INTR_REMAP if (intr_remapping_enabled) disable_intr_remapping(); - +#endif local_irq_restore(flags); return 0; } @@ -2034,34 +1982,42 @@ static int lapic_resume(struct sys_device *dev) unsigned int l, h; unsigned long flags; int maxlvt; - int ret = 0; + +#ifdef CONFIG_INTR_REMAP + int ret; struct IO_APIC_route_entry **ioapic_entries = NULL; if (!apic_pm_state.active) return 0; local_irq_save(flags); - if (intr_remapping_enabled) { + if (x2apic) { ioapic_entries = alloc_ioapic_entries(); if (!ioapic_entries) { WARN(1, "Alloc ioapic_entries in lapic resume failed."); - ret = -ENOMEM; - goto restore; + return -ENOMEM; } ret = save_IO_APIC_setup(ioapic_entries); if (ret) { WARN(1, "Saving IO-APIC state failed: %d\n", ret); free_ioapic_entries(ioapic_entries); - goto restore; + return ret; } mask_IO_APIC_setup(ioapic_entries); mask_8259A(); + enable_x2apic(); } +#else + if (!apic_pm_state.active) + return 0; - if (x2apic_mode) + local_irq_save(flags); + if (x2apic) enable_x2apic(); +#endif + else { /* * Make sure the APICBASE points to the right address @@ -2099,16 +2055,21 @@ static int lapic_resume(struct sys_device *dev) apic_write(APIC_ESR, 0); apic_read(APIC_ESR); - if (intr_remapping_enabled) { - reenable_intr_remapping(x2apic_mode); +#ifdef CONFIG_INTR_REMAP + if (intr_remapping_enabled) + reenable_intr_remapping(EIM_32BIT_APIC_ID); + + if (x2apic) { unmask_8259A(); restore_IO_APIC_setup(ioapic_entries); free_ioapic_entries(ioapic_entries); } -restore: +#endif + local_irq_restore(flags); - return ret; + + return 0; } /* @@ -2156,14 +2117,31 @@ static void apic_pm_activate(void) { } #endif /* CONFIG_PM */ #ifdef CONFIG_X86_64 - -static int __cpuinit apic_cluster_num(void) +/* + * apic_is_clustered_box() -- Check if we can expect good TSC + * + * Thus far, the major user of this is IBM's Summit2 series: + * + * Clustered boxes may have unsynced TSC problems if they are + * multi-chassis. Use available data to take a good guess. + * If in doubt, go HPET. + */ +__cpuinit int apic_is_clustered_box(void) { int i, clusters, zeros; unsigned id; u16 *bios_cpu_apicid; DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); + /* + * there is not this kind of box with AMD CPU yet. + * Some AMD box with quadcore cpu and 8 sockets apicid + * will be [4, 0x23] or [8, 0x27] could be thought to + * vsmp box still need checking... + */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box()) + return 0; + bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); bitmap_zero(clustermap, NUM_APIC_CLUSTERS); @@ -2199,67 +2177,18 @@ static int __cpuinit apic_cluster_num(void) ++zeros; } - return clusters; -} - -static int __cpuinitdata multi_checked; -static int __cpuinitdata multi; - -static int __cpuinit set_multi(const struct dmi_system_id *d) -{ - if (multi) - return 0; - pr_info("APIC: %s detected, Multi Chassis\n", d->ident); - multi = 1; - return 0; -} - -static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { - { - .callback = set_multi, - .ident = "IBM System Summit2", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), - DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"), - }, - }, - {} -}; - -static void __cpuinit dmi_check_multi(void) -{ - if (multi_checked) - return; - - dmi_check_system(multi_dmi_table); - multi_checked = 1; -} - -/* - * apic_is_clustered_box() -- Check if we can expect good TSC - * - * Thus far, the major user of this is IBM's Summit2 series: - * Clustered boxes may have unsynced TSC problems if they are - * multi-chassis. - * Use DMI to check them - */ -__cpuinit int apic_is_clustered_box(void) -{ - dmi_check_multi(); - if (multi) - return 1; - - if (!is_vsmp_box()) - return 0; - - /* - * ScaleMP vSMPowered boxes have one cluster per board and TSCs are + /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are * not guaranteed to be synced between boards */ - if (apic_cluster_num() > 1) + if (is_vsmp_box() && clusters > 1) return 1; - return 0; + /* + * If clusters > 2, then should be multi-chassis. + * May have to revisit this when multi-core + hyperthreaded CPUs come + * out, but AFAIK this will work even for them. + */ + return (clusters > 2); } #endif diff --git a/trunk/arch/x86/kernel/apic/apic_flat_64.c b/trunk/arch/x86/kernel/apic/apic_flat_64.c index d0c99abc26c3..306e5e88fb6f 100644 --- a/trunk/arch/x86/kernel/apic/apic_flat_64.c +++ b/trunk/arch/x86/kernel/apic/apic_flat_64.c @@ -161,7 +161,7 @@ static int flat_apic_id_registered(void) static int flat_phys_pkg_id(int initial_apic_id, int index_msb) { - return initial_apic_id >> index_msb; + return hard_smp_processor_id() >> index_msb; } struct apic apic_flat = { @@ -235,7 +235,7 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) * regardless of how many processors are present (x86_64 ES7000 * is an example). */ - if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && + if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { printk(KERN_DEBUG "system APIC only can use physical flat"); return 1; diff --git a/trunk/arch/x86/kernel/apic/es7000_32.c b/trunk/arch/x86/kernel/apic/es7000_32.c index 69328ac8de9c..302947775575 100644 --- a/trunk/arch/x86/kernel/apic/es7000_32.c +++ b/trunk/arch/x86/kernel/apic/es7000_32.c @@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi) return gsi; } -static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) +static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) { unsigned long vect = 0, psaival = 0; diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 1946fac42ab3..30da617d18e4 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include @@ -130,9 +129,12 @@ struct irq_pin_list { struct irq_pin_list *next; }; -static struct irq_pin_list *get_one_free_irq_2_pin(int node) +static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) { struct irq_pin_list *pin; + int node; + + node = cpu_to_node(cpu); pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); @@ -146,6 +148,9 @@ struct irq_cfg { unsigned move_cleanup_count; u8 vector; u8 move_in_progress : 1; +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + u8 move_desc_pending : 1; +#endif }; /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ @@ -207,9 +212,12 @@ static struct irq_cfg *irq_cfg(unsigned int irq) return cfg; } -static struct irq_cfg *get_one_free_irq_cfg(int node) +static struct irq_cfg *get_one_free_irq_cfg(int cpu) { struct irq_cfg *cfg; + int node; + + node = cpu_to_node(cpu); cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); if (cfg) { @@ -230,13 +238,13 @@ static struct irq_cfg *get_one_free_irq_cfg(int node) return cfg; } -int arch_init_chip_data(struct irq_desc *desc, int node) +int arch_init_chip_data(struct irq_desc *desc, int cpu) { struct irq_cfg *cfg; cfg = desc->chip_data; if (!cfg) { - desc->chip_data = get_one_free_irq_cfg(node); + desc->chip_data = get_one_free_irq_cfg(cpu); if (!desc->chip_data) { printk(KERN_ERR "can not alloc irq_cfg\n"); BUG_ON(1); @@ -246,9 +254,10 @@ int arch_init_chip_data(struct irq_desc *desc, int node) return 0; } -/* for move_irq_desc */ +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + static void -init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) +init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu) { struct irq_pin_list *old_entry, *head, *tail, *entry; @@ -257,7 +266,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) if (!old_entry) return; - entry = get_one_free_irq_2_pin(node); + entry = get_one_free_irq_2_pin(cpu); if (!entry) return; @@ -267,7 +276,7 @@ init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) tail = entry; old_entry = old_entry->next; while (old_entry) { - entry = get_one_free_irq_2_pin(node); + entry = get_one_free_irq_2_pin(cpu); if (!entry) { entry = head; while (entry) { @@ -307,12 +316,12 @@ static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) } void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int node) + struct irq_desc *desc, int cpu) { struct irq_cfg *cfg; struct irq_cfg *old_cfg; - cfg = get_one_free_irq_cfg(node); + cfg = get_one_free_irq_cfg(cpu); if (!cfg) return; @@ -323,7 +332,7 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); - init_copy_irq_2_pin(old_cfg, cfg, node); + init_copy_irq_2_pin(old_cfg, cfg, cpu); } static void free_irq_cfg(struct irq_cfg *old_cfg) @@ -347,7 +356,19 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) old_desc->chip_data = NULL; } } -/* end for move_irq_desc */ + +static void +set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg = desc->chip_data; + + if (!cfg->move_in_progress) { + /* it means that domain is not changed */ + if (!cpumask_intersects(desc->affinity, mask)) + cfg->move_desc_pending = 1; + } +} +#endif #else static struct irq_cfg *irq_cfg(unsigned int irq) @@ -357,6 +378,13 @@ static struct irq_cfg *irq_cfg(unsigned int irq) #endif +#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC +static inline void +set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) +{ +} +#endif + struct io_apic { unsigned int index; unsigned int unused[3]; @@ -490,18 +518,132 @@ static void ioapic_mask_entry(int apic, int pin) spin_unlock_irqrestore(&ioapic_lock, flags); } +#ifdef CONFIG_SMP +static void send_cleanup_vector(struct irq_cfg *cfg) +{ + cpumask_var_t cleanup_mask; + + if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { + unsigned int i; + cfg->move_cleanup_count = 0; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + cfg->move_cleanup_count++; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); + } else { + cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); + cfg->move_cleanup_count = cpumask_weight(cleanup_mask); + apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + free_cpumask_var(cleanup_mask); + } + cfg->move_in_progress = 0; +} + +static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) +{ + int apic, pin; + struct irq_pin_list *entry; + u8 vector = cfg->vector; + + entry = cfg->irq_2_pin; + for (;;) { + unsigned int reg; + + if (!entry) + break; + + apic = entry->apic; + pin = entry->pin; + /* + * With interrupt-remapping, destination information comes + * from interrupt-remapping table entry. + */ + if (!irq_remapped(irq)) + io_apic_write(apic, 0x11 + pin*2, dest); + reg = io_apic_read(apic, 0x10 + pin*2); + reg &= ~IO_APIC_REDIR_VECTOR_MASK; + reg |= vector; + io_apic_modify(apic, 0x10 + pin*2, reg); + if (!entry->next) + break; + entry = entry->next; + } +} + +static int +assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); + +/* + * Either sets desc->affinity to a valid value, and returns + * ->cpu_mask_to_apicid of that, or returns BAD_APICID and + * leaves desc->affinity untouched. + */ +static unsigned int +set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg; + unsigned int irq; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return BAD_APICID; + + irq = desc->irq; + cfg = desc->chip_data; + if (assign_irq_vector(irq, cfg, mask)) + return BAD_APICID; + + /* check that before desc->addinity get updated */ + set_extra_move_desc(desc, mask); + + cpumask_copy(desc->affinity, mask); + + return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); +} + +static void +set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg; + unsigned long flags; + unsigned int dest; + unsigned int irq; + + irq = desc->irq; + cfg = desc->chip_data; + + spin_lock_irqsave(&ioapic_lock, flags); + dest = set_desc_affinity(desc, mask); + if (dest != BAD_APICID) { + /* Only the high 8 bits are valid. */ + dest = SET_APIC_LOGICAL_ID(dest); + __target_IO_APIC_irq(irq, dest, cfg); + } + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +static void +set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + + set_ioapic_affinity_irq_desc(desc, mask); +} +#endif /* CONFIG_SMP */ + /* * The common case is 1:1 IRQ<->pin mappings. Sometimes there are * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ -static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) +static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin) { struct irq_pin_list *entry; entry = cfg->irq_2_pin; if (!entry) { - entry = get_one_free_irq_2_pin(node); + entry = get_one_free_irq_2_pin(cpu); if (!entry) { printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", apic, pin); @@ -521,7 +663,7 @@ static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin entry = entry->next; } - entry->next = get_one_free_irq_2_pin(node); + entry->next = get_one_free_irq_2_pin(cpu); entry = entry->next; entry->apic = apic; entry->pin = pin; @@ -530,7 +672,7 @@ static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin /* * Reroute an IRQ to a different pin. */ -static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, +static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu, int oldapic, int oldpin, int newapic, int newpin) { @@ -550,7 +692,7 @@ static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, /* why? call replace before add? */ if (!replaced) - add_pin_to_irq_node(cfg, node, newapic, newpin); + add_pin_to_irq_cpu(cfg, cpu, newapic, newpin); } static inline void io_apic_modify_irq(struct irq_cfg *cfg, @@ -708,6 +850,7 @@ static int __init ioapic_pirq_setup(char *str) __setup("pirq=", ioapic_pirq_setup); #endif /* CONFIG_X86_32 */ +#ifdef CONFIG_INTR_REMAP struct IO_APIC_route_entry **alloc_ioapic_entries(void) { int apic; @@ -805,6 +948,20 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) return 0; } +void reinit_intr_remapped_IO_APIC(int intr_remapping, + struct IO_APIC_route_entry **ioapic_entries) + +{ + /* + * for now plain restore of previous settings. + * TBD: In the case of OS enabling interrupt-remapping, + * IO-APIC RTE's need to be setup to point to interrupt-remapping + * table entries. for now, do a plain restore, and wait for + * the setup_IO_APIC_irqs() to do proper initialization. + */ + restore_IO_APIC_setup(ioapic_entries); +} + void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) { int apic; @@ -814,6 +971,7 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) kfree(ioapic_entries); } +#endif /* * Find the IRQ entry number of a certain pin. @@ -874,6 +1032,54 @@ static int __init find_isa_irq_apic(int irq, int type) return -1; } +/* + * Find a specific PCI IRQ entry. + * Not an __init, possibly needed by modules + */ +static int pin_2_irq(int idx, int apic, int pin); + +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin) +{ + int apic, i, best_guess = -1; + + apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", + bus, slot, pin); + if (test_bit(bus, mp_bus_not_pci)) { + apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus); + return -1; + } + for (i = 0; i < mp_irq_entries; i++) { + int lbus = mp_irqs[i].srcbus; + + for (apic = 0; apic < nr_ioapics; apic++) + if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || + mp_irqs[i].dstapic == MP_APIC_ALL) + break; + + if (!test_bit(lbus, mp_bus_not_pci) && + !mp_irqs[i].irqtype && + (bus == lbus) && + (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { + int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); + + if (!(apic || IO_APIC_IRQ(irq))) + continue; + + if (pin == (mp_irqs[i].srcbusirq & 3)) + return irq; + /* + * Use the first all-but-pin matching entry as a + * best-guess fuzzy result for broken mptables. + */ + if (best_guess < 0) + best_guess = irq; + } + } + return best_guess; +} + +EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + #if defined(CONFIG_EISA) || defined(CONFIG_MCA) /* * EISA Edge/Level control register, ELCR @@ -1092,64 +1298,6 @@ static int pin_2_irq(int idx, int apic, int pin) return irq; } -/* - * Find a specific PCI IRQ entry. - * Not an __init, possibly needed by modules - */ -int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, - struct io_apic_irq_attr *irq_attr) -{ - int apic, i, best_guess = -1; - - apic_printk(APIC_DEBUG, - "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", - bus, slot, pin); - if (test_bit(bus, mp_bus_not_pci)) { - apic_printk(APIC_VERBOSE, - "PCI BIOS passed nonexistent PCI bus %d!\n", bus); - return -1; - } - for (i = 0; i < mp_irq_entries; i++) { - int lbus = mp_irqs[i].srcbus; - - for (apic = 0; apic < nr_ioapics; apic++) - if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || - mp_irqs[i].dstapic == MP_APIC_ALL) - break; - - if (!test_bit(lbus, mp_bus_not_pci) && - !mp_irqs[i].irqtype && - (bus == lbus) && - (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { - int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); - - if (!(apic || IO_APIC_IRQ(irq))) - continue; - - if (pin == (mp_irqs[i].srcbusirq & 3)) { - set_io_apic_irq_attr(irq_attr, apic, - mp_irqs[i].dstirq, - irq_trigger(i), - irq_polarity(i)); - return irq; - } - /* - * Use the first all-but-pin matching entry as a - * best-guess fuzzy result for broken mptables. - */ - if (best_guess < 0) { - set_io_apic_irq_attr(irq_attr, apic, - mp_irqs[i].dstirq, - irq_trigger(i), - irq_polarity(i)); - best_guess = irq; - } - } - } - return best_guess; -} -EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); - void lock_vector_lock(void) { /* Used to the online set of cpus does not change @@ -1480,70 +1628,58 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq ioapic_write_entry(apic_id, pin, entry); } -static struct { - DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); -} mp_ioapic_routing[MAX_IO_APICS]; - static void __init setup_IO_APIC_irqs(void) { - int apic_id = 0, pin, idx, irq; + int apic_id, pin, idx, irq; int notcon = 0; struct irq_desc *desc; struct irq_cfg *cfg; - int node = cpu_to_node(boot_cpu_id); + int cpu = boot_cpu_id; apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); -#ifdef CONFIG_ACPI - if (!acpi_disabled && acpi_ioapic) { - apic_id = mp_find_ioapic(0); - if (apic_id < 0) - apic_id = 0; - } -#endif - - for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { - idx = find_irq_entry(apic_id, pin, mp_INT); - if (idx == -1) { - if (!notcon) { - notcon = 1; + for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { + for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { + + idx = find_irq_entry(apic_id, pin, mp_INT); + if (idx == -1) { + if (!notcon) { + notcon = 1; + apic_printk(APIC_VERBOSE, + KERN_DEBUG " %d-%d", + mp_ioapics[apic_id].apicid, pin); + } else + apic_printk(APIC_VERBOSE, " %d-%d", + mp_ioapics[apic_id].apicid, pin); + continue; + } + if (notcon) { apic_printk(APIC_VERBOSE, - KERN_DEBUG " %d-%d", - mp_ioapics[apic_id].apicid, pin); - } else - apic_printk(APIC_VERBOSE, " %d-%d", - mp_ioapics[apic_id].apicid, pin); - continue; - } - if (notcon) { - apic_printk(APIC_VERBOSE, - " (apicid-pin) not connected\n"); - notcon = 0; - } + " (apicid-pin) not connected\n"); + notcon = 0; + } - irq = pin_2_irq(idx, apic_id, pin); + irq = pin_2_irq(idx, apic_id, pin); - /* - * Skip the timer IRQ if there's a quirk handler - * installed and if it returns 1: - */ - if (apic->multi_timer_check && - apic->multi_timer_check(apic_id, irq)) - continue; + /* + * Skip the timer IRQ if there's a quirk handler + * installed and if it returns 1: + */ + if (apic->multi_timer_check && + apic->multi_timer_check(apic_id, irq)) + continue; - desc = irq_to_desc_alloc_node(irq, node); - if (!desc) { - printk(KERN_INFO "can not get irq_desc for %d\n", irq); - continue; + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc for %d\n", irq); + continue; + } + cfg = desc->chip_data; + add_pin_to_irq_cpu(cfg, cpu, apic_id, pin); + + setup_IO_APIC_irq(apic_id, pin, irq, desc, + irq_trigger(idx), irq_polarity(idx)); } - cfg = desc->chip_data; - add_pin_to_irq_node(cfg, node, apic_id, pin); - /* - * don't mark it in pin_programmed, so later acpi could - * set it correctly when irq < 16 - */ - setup_IO_APIC_irq(apic_id, pin, irq, desc, - irq_trigger(idx), irq_polarity(idx)); } if (notcon) @@ -1733,7 +1869,7 @@ __apicdebuginit(void) print_APIC_bitfield(int base) __apicdebuginit(void) print_local_APIC(void *dummy) { - unsigned int i, v, ver, maxlvt; + unsigned int v, ver, maxlvt; u64 icr; if (apic_verbosity == APIC_QUIET) @@ -1821,18 +1957,6 @@ __apicdebuginit(void) print_local_APIC(void *dummy) printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); v = apic_read(APIC_TDCR); printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); - - if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { - v = apic_read(APIC_EFEAT); - maxlvt = (v >> 16) & 0xff; - printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); - v = apic_read(APIC_ECTRL); - printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); - for (i = 0; i < maxlvt; i++) { - v = apic_read(APIC_EILVTn(i)); - printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); - } - } printk("\n"); } @@ -1881,11 +2005,6 @@ __apicdebuginit(void) print_PIC(void) __apicdebuginit(int) print_all_ICs(void) { print_PIC(); - - /* don't print out if apic is not there */ - if (!cpu_has_apic || disable_apic) - return 0; - print_all_local_APICs(); print_IO_APIC(); @@ -2241,118 +2360,6 @@ static int ioapic_retrigger_irq(unsigned int irq) */ #ifdef CONFIG_SMP -static void send_cleanup_vector(struct irq_cfg *cfg) -{ - cpumask_var_t cleanup_mask; - - if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { - unsigned int i; - cfg->move_cleanup_count = 0; - for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) - cfg->move_cleanup_count++; - for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) - apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); - } else { - cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); - cfg->move_cleanup_count = cpumask_weight(cleanup_mask); - apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); - free_cpumask_var(cleanup_mask); - } - cfg->move_in_progress = 0; -} - -static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) -{ - int apic, pin; - struct irq_pin_list *entry; - u8 vector = cfg->vector; - - entry = cfg->irq_2_pin; - for (;;) { - unsigned int reg; - - if (!entry) - break; - - apic = entry->apic; - pin = entry->pin; - /* - * With interrupt-remapping, destination information comes - * from interrupt-remapping table entry. - */ - if (!irq_remapped(irq)) - io_apic_write(apic, 0x11 + pin*2, dest); - reg = io_apic_read(apic, 0x10 + pin*2); - reg &= ~IO_APIC_REDIR_VECTOR_MASK; - reg |= vector; - io_apic_modify(apic, 0x10 + pin*2, reg); - if (!entry->next) - break; - entry = entry->next; - } -} - -static int -assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); - -/* - * Either sets desc->affinity to a valid value, and returns - * ->cpu_mask_to_apicid of that, or returns BAD_APICID and - * leaves desc->affinity untouched. - */ -static unsigned int -set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) -{ - struct irq_cfg *cfg; - unsigned int irq; - - if (!cpumask_intersects(mask, cpu_online_mask)) - return BAD_APICID; - - irq = desc->irq; - cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return BAD_APICID; - - cpumask_copy(desc->affinity, mask); - - return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); -} - -static int -set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) -{ - struct irq_cfg *cfg; - unsigned long flags; - unsigned int dest; - unsigned int irq; - int ret = -1; - - irq = desc->irq; - cfg = desc->chip_data; - - spin_lock_irqsave(&ioapic_lock, flags); - dest = set_desc_affinity(desc, mask); - if (dest != BAD_APICID) { - /* Only the high 8 bits are valid. */ - dest = SET_APIC_LOGICAL_ID(dest); - __target_IO_APIC_irq(irq, dest, cfg); - ret = 0; - } - spin_unlock_irqrestore(&ioapic_lock, flags); - - return ret; -} - -static int -set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - return set_ioapic_affinity_irq_desc(desc, mask); -} #ifdef CONFIG_INTR_REMAP @@ -2367,25 +2374,26 @@ set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) * Real vector that is used for interrupting cpu will be coming from * the interrupt-remapping table entry. */ -static int +static void migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) { struct irq_cfg *cfg; struct irte irte; unsigned int dest; unsigned int irq; - int ret = -1; if (!cpumask_intersects(mask, cpu_online_mask)) - return ret; + return; irq = desc->irq; if (get_irte(irq, &irte)) - return ret; + return; cfg = desc->chip_data; if (assign_irq_vector(irq, cfg, mask)) - return ret; + return; + + set_extra_move_desc(desc, mask); dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); @@ -2401,30 +2409,27 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) send_cleanup_vector(cfg); cpumask_copy(desc->affinity, mask); - - return 0; } /* * Migrates the IRQ destination in the process context. */ -static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, +static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) { - return migrate_ioapic_irq_desc(desc, mask); + migrate_ioapic_irq_desc(desc, mask); } -static int set_ir_ioapic_affinity_irq(unsigned int irq, +static void set_ir_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); - return set_ir_ioapic_affinity_irq_desc(desc, mask); + set_ir_ioapic_affinity_irq_desc(desc, mask); } #else -static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, +static inline void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) { - return 0; } #endif @@ -2486,19 +2491,86 @@ static void irq_complete_move(struct irq_desc **descp) struct irq_cfg *cfg = desc->chip_data; unsigned vector, me; - if (likely(!cfg->move_in_progress)) + if (likely(!cfg->move_in_progress)) { +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + if (likely(!cfg->move_desc_pending)) + return; + + /* domain has not changed, but affinity did */ + me = smp_processor_id(); + if (cpumask_test_cpu(me, desc->affinity)) { + *descp = desc = move_irq_desc(desc, me); + /* get the new one */ + cfg = desc->chip_data; + cfg->move_desc_pending = 0; + } +#endif return; + } vector = ~get_irq_regs()->orig_ax; me = smp_processor_id(); - if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + *descp = desc = move_irq_desc(desc, me); + /* get the new one */ + cfg = desc->chip_data; +#endif send_cleanup_vector(cfg); + } } #else static inline void irq_complete_move(struct irq_desc **descp) {} #endif +static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) +{ + int apic, pin; + struct irq_pin_list *entry; + + entry = cfg->irq_2_pin; + for (;;) { + + if (!entry) + break; + + apic = entry->apic; + pin = entry->pin; + io_apic_eoi(apic, pin); + entry = entry->next; + } +} + +static void +eoi_ioapic_irq(struct irq_desc *desc) +{ + struct irq_cfg *cfg; + unsigned long flags; + unsigned int irq; + + irq = desc->irq; + cfg = desc->chip_data; + + spin_lock_irqsave(&ioapic_lock, flags); + __eoi_ioapic_irq(irq, cfg); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +#ifdef CONFIG_X86_X2APIC +static void ack_x2apic_level(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + ack_x2APIC_irq(); + eoi_ioapic_irq(desc); +} + +static void ack_x2apic_edge(unsigned int irq) +{ + ack_x2APIC_irq(); +} +#endif + static void ack_apic_edge(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); @@ -2562,6 +2634,9 @@ static void ack_apic_level(unsigned int irq) */ ack_APIC_irq(); + if (irq_remapped(irq)) + eoi_ioapic_irq(desc); + /* Now we can move and renable the irq */ if (unlikely(do_unmask_irq)) { /* Only migrate the irq if the ack has been received. @@ -2608,50 +2683,22 @@ static void ack_apic_level(unsigned int irq) } #ifdef CONFIG_INTR_REMAP -static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) -{ - int apic, pin; - struct irq_pin_list *entry; - - entry = cfg->irq_2_pin; - for (;;) { - - if (!entry) - break; - - apic = entry->apic; - pin = entry->pin; - io_apic_eoi(apic, pin); - entry = entry->next; - } -} - -static void -eoi_ioapic_irq(struct irq_desc *desc) -{ - struct irq_cfg *cfg; - unsigned long flags; - unsigned int irq; - - irq = desc->irq; - cfg = desc->chip_data; - - spin_lock_irqsave(&ioapic_lock, flags); - __eoi_ioapic_irq(irq, cfg); - spin_unlock_irqrestore(&ioapic_lock, flags); -} - static void ir_ack_apic_edge(unsigned int irq) { - ack_APIC_irq(); +#ifdef CONFIG_X86_X2APIC + if (x2apic_enabled()) + return ack_x2apic_edge(irq); +#endif + return ack_apic_edge(irq); } static void ir_ack_apic_level(unsigned int irq) { - struct irq_desc *desc = irq_to_desc(irq); - - ack_APIC_irq(); - eoi_ioapic_irq(desc); +#ifdef CONFIG_X86_X2APIC + if (x2apic_enabled()) + return ack_x2apic_level(irq); +#endif + return ack_apic_level(irq); } #endif /* CONFIG_INTR_REMAP */ @@ -2856,7 +2903,7 @@ static inline void __init check_timer(void) { struct irq_desc *desc = irq_to_desc(0); struct irq_cfg *cfg = desc->chip_data; - int node = cpu_to_node(boot_cpu_id); + int cpu = boot_cpu_id; int apic1, pin1, apic2, pin2; unsigned long flags; int no_pin1 = 0; @@ -2922,7 +2969,7 @@ static inline void __init check_timer(void) * Ok, does IRQ0 through the IOAPIC work? */ if (no_pin1) { - add_pin_to_irq_node(cfg, node, apic1, pin1); + add_pin_to_irq_cpu(cfg, cpu, apic1, pin1); setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); } else { /* for edge trigger, setup_IO_APIC_irq already @@ -2959,7 +3006,7 @@ static inline void __init check_timer(void) /* * legacy devices should be connected to IO APIC #0 */ - replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); + replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); enable_8259A_irq(0); if (timer_irq_works()) { @@ -3171,13 +3218,14 @@ static int nr_irqs_gsi = NR_IRQS_LEGACY; /* * Dynamic irq allocate and deallocation */ -unsigned int create_irq_nr(unsigned int irq_want, int node) +unsigned int create_irq_nr(unsigned int irq_want) { /* Allocate an unused irq */ unsigned int irq; unsigned int new; unsigned long flags; struct irq_cfg *cfg_new = NULL; + int cpu = boot_cpu_id; struct irq_desc *desc_new = NULL; irq = 0; @@ -3186,7 +3234,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) spin_lock_irqsave(&vector_lock, flags); for (new = irq_want; new < nr_irqs; new++) { - desc_new = irq_to_desc_alloc_node(new, node); + desc_new = irq_to_desc_alloc_cpu(new, cpu); if (!desc_new) { printk(KERN_INFO "can not get irq_desc for %d\n", new); continue; @@ -3195,9 +3243,6 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) if (cfg_new->vector != 0) continue; - - desc_new = move_irq_desc(desc_new, node); - if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) irq = new; break; @@ -3215,12 +3260,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) int create_irq(void) { - int node = cpu_to_node(boot_cpu_id); unsigned int irq_want; int irq; irq_want = nr_irqs_gsi; - irq = create_irq_nr(irq_want, node); + irq = create_irq_nr(irq_want); if (irq == 0) irq = -1; @@ -3322,7 +3366,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms } #ifdef CONFIG_SMP -static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) +static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3331,7 +3375,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) - return -1; + return; cfg = desc->chip_data; @@ -3343,15 +3387,13 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) msg.address_lo |= MSI_ADDR_DEST_ID(dest); write_msi_msg_desc(desc, &msg); - - return 0; } #ifdef CONFIG_INTR_REMAP /* * Migrate the MSI irq to another cpumask. This migration is * done in the process context using interrupt-remapping hardware. */ -static int +static void ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); @@ -3360,11 +3402,11 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) struct irte irte; if (get_irte(irq, &irte)) - return -1; + return; dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) - return -1; + return; irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -3381,8 +3423,6 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) */ if (cfg->move_in_progress) send_cleanup_vector(cfg); - - return 0; } #endif @@ -3478,17 +3518,15 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) unsigned int irq_want; struct intel_iommu *iommu = NULL; int index = 0; - int node; /* x86 doesn't support multiple MSI yet */ if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; - node = dev_to_node(&dev->dev); irq_want = nr_irqs_gsi; sub_handle = 0; list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = create_irq_nr(irq_want, node); + irq = create_irq_nr(irq_want); if (irq == 0) return -1; irq_want = irq + 1; @@ -3538,7 +3576,7 @@ void arch_teardown_msi_irq(unsigned int irq) #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) #ifdef CONFIG_SMP -static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3547,7 +3585,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) - return -1; + return; cfg = desc->chip_data; @@ -3559,8 +3597,6 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) msg.address_lo |= MSI_ADDR_DEST_ID(dest); dmar_msi_write(irq, &msg); - - return 0; } #endif /* CONFIG_SMP */ @@ -3594,7 +3630,7 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP -static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3603,7 +3639,7 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) - return -1; + return; cfg = desc->chip_data; @@ -3615,8 +3651,6 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) msg.address_lo |= MSI_ADDR_DEST_ID(dest); hpet_msi_write(irq, &msg); - - return 0; } #endif /* CONFIG_SMP */ @@ -3673,7 +3707,7 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) write_ht_irq_msg(irq, &msg); } -static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) +static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3681,13 +3715,11 @@ static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) dest = set_desc_affinity(desc, mask); if (dest == BAD_APICID) - return -1; + return; cfg = desc->chip_data; target_ht_irq(irq, dest, cfg->vector); - - return 0; } #endif @@ -3762,8 +3794,6 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long flags; int err; - BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); - cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, eligible_cpu); @@ -3777,13 +3807,15 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; - entry->vector = cfg->vector; - entry->delivery_mode = apic->irq_delivery_mode; - entry->dest_mode = apic->irq_dest_mode; - entry->polarity = 0; - entry->trigger = 0; - entry->mask = 0; - entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); + BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); + + entry->vector = cfg->vector; + entry->delivery_mode = apic->irq_delivery_mode; + entry->dest_mode = apic->irq_dest_mode; + entry->polarity = 0; + entry->trigger = 0; + entry->mask = 0; + entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); @@ -3801,10 +3833,10 @@ void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) struct uv_IO_APIC_route_entry *entry; int mmr_pnode; - BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); - mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; + BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); + entry->mask = 1; mmr_pnode = uv_blade_to_pnode(mmr_blade); @@ -3868,71 +3900,6 @@ int __init arch_probe_nr_irqs(void) } #endif -static int __io_apic_set_pci_routing(struct device *dev, int irq, - struct io_apic_irq_attr *irq_attr) -{ - struct irq_desc *desc; - struct irq_cfg *cfg; - int node; - int ioapic, pin; - int trigger, polarity; - - ioapic = irq_attr->ioapic; - if (!IO_APIC_IRQ(irq)) { - apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", - ioapic); - return -EINVAL; - } - - if (dev) - node = dev_to_node(dev); - else - node = cpu_to_node(boot_cpu_id); - - desc = irq_to_desc_alloc_node(irq, node); - if (!desc) { - printk(KERN_INFO "can not get irq_desc %d\n", irq); - return 0; - } - - pin = irq_attr->ioapic_pin; - trigger = irq_attr->trigger; - polarity = irq_attr->polarity; - - /* - * IRQs < 16 are already in the irq_2_pin[] map - */ - if (irq >= NR_IRQS_LEGACY) { - cfg = desc->chip_data; - add_pin_to_irq_node(cfg, node, ioapic, pin); - } - - setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); - - return 0; -} - -int io_apic_set_pci_routing(struct device *dev, int irq, - struct io_apic_irq_attr *irq_attr) -{ - int ioapic, pin; - /* - * Avoid pin reprogramming. PRTs typically include entries - * with redundant pin->gsi mappings (but unique PCI devices); - * we only program the IOAPIC on the first. - */ - ioapic = irq_attr->ioapic; - pin = irq_attr->ioapic_pin; - if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { - pr_debug("Pin %d-%d already programmed\n", - mp_ioapics[ioapic].apicid, pin); - return 0; - } - set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); - - return __io_apic_set_pci_routing(dev, irq, irq_attr); -} - /* -------------------------------------------------------------------------- ACPI-based IOAPIC Configuration -------------------------------------------------------------------------- */ @@ -4013,7 +3980,6 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) return apic_id; } -#endif int __init io_apic_get_version(int ioapic) { @@ -4026,6 +3992,39 @@ int __init io_apic_get_version(int ioapic) return reg_01.bits.version; } +#endif + +int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) +{ + struct irq_desc *desc; + struct irq_cfg *cfg; + int cpu = boot_cpu_id; + + if (!IO_APIC_IRQ(irq)) { + apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", + ioapic); + return -EINVAL; + } + + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc %d\n", irq); + return 0; + } + + /* + * IRQs < 16 are already in the irq_2_pin[] map + */ + if (irq >= NR_IRQS_LEGACY) { + cfg = desc->chip_data; + add_pin_to_irq_cpu(cfg, cpu, ioapic, pin); + } + + setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity); + + return 0; +} + int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) { @@ -4056,44 +4055,51 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) #ifdef CONFIG_SMP void __init setup_ioapic_dest(void) { - int pin, ioapic = 0, irq, irq_entry; + int pin, ioapic, irq, irq_entry; struct irq_desc *desc; + struct irq_cfg *cfg; const struct cpumask *mask; if (skip_ioapic_setup == 1) return; -#ifdef CONFIG_ACPI - if (!acpi_disabled && acpi_ioapic) { - ioapic = mp_find_ioapic(0); - if (ioapic < 0) - ioapic = 0; - } -#endif + for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { + for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { + irq_entry = find_irq_entry(ioapic, pin, mp_INT); + if (irq_entry == -1) + continue; + irq = pin_2_irq(irq_entry, ioapic, pin); - for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { - irq_entry = find_irq_entry(ioapic, pin, mp_INT); - if (irq_entry == -1) - continue; - irq = pin_2_irq(irq_entry, ioapic, pin); + /* setup_IO_APIC_irqs could fail to get vector for some device + * when you have too many devices, because at that time only boot + * cpu is online. + */ + desc = irq_to_desc(irq); + cfg = desc->chip_data; + if (!cfg->vector) { + setup_IO_APIC_irq(ioapic, pin, irq, desc, + irq_trigger(irq_entry), + irq_polarity(irq_entry)); + continue; - desc = irq_to_desc(irq); + } - /* - * Honour affinities which have been set in early boot - */ - if (desc->status & - (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = desc->affinity; - else - mask = apic->target_cpus(); + /* + * Honour affinities which have been set in early boot + */ + if (desc->status & + (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) + mask = desc->affinity; + else + mask = apic->target_cpus(); - if (intr_remapping_enabled) - set_ir_ioapic_affinity_irq_desc(desc, mask); - else - set_ioapic_affinity_irq_desc(desc, mask); - } + if (intr_remapping_enabled) + set_ir_ioapic_affinity_irq_desc(desc, mask); + else + set_ioapic_affinity_irq_desc(desc, mask); + } + } } #endif diff --git a/trunk/arch/x86/kernel/apic/nmi.c b/trunk/arch/x86/kernel/apic/nmi.c index a691302dc3ff..ce4fbfa315a1 100644 --- a/trunk/arch/x86/kernel/apic/nmi.c +++ b/trunk/arch/x86/kernel/apic/nmi.c @@ -104,7 +104,7 @@ static __init void nmi_cpu_busy(void *data) } #endif -static void report_broken_nmi(int cpu, unsigned int *prev_nmi_count) +static void report_broken_nmi(int cpu, int *prev_nmi_count) { printk(KERN_CONT "\n"); diff --git a/trunk/arch/x86/kernel/apic/probe_32.c b/trunk/arch/x86/kernel/apic/probe_32.c index 440a8bccd91a..01eda2ac65e4 100644 --- a/trunk/arch/x86/kernel/apic/probe_32.c +++ b/trunk/arch/x86/kernel/apic/probe_32.c @@ -160,6 +160,7 @@ extern struct apic apic_summit; extern struct apic apic_bigsmp; extern struct apic apic_es7000; extern struct apic apic_es7000_cluster; +extern struct apic apic_default; struct apic *apic = &apic_default; EXPORT_SYMBOL_GPL(apic); diff --git a/trunk/arch/x86/kernel/apic/probe_64.c b/trunk/arch/x86/kernel/apic/probe_64.c index bc3e880f9b82..1783652bb0e5 100644 --- a/trunk/arch/x86/kernel/apic/probe_64.c +++ b/trunk/arch/x86/kernel/apic/probe_64.c @@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = { void __init default_setup_apic_routing(void) { #ifdef CONFIG_X86_X2APIC - if (x2apic_mode && (apic != &apic_x2apic_phys && + if (x2apic && (apic != &apic_x2apic_phys && #ifdef CONFIG_X86_UV apic != &apic_x2apic_uv_x && #endif diff --git a/trunk/arch/x86/kernel/apic/summit_32.c b/trunk/arch/x86/kernel/apic/summit_32.c index 344eee4ac0a4..9cfe1f415d81 100644 --- a/trunk/arch/x86/kernel/apic/summit_32.c +++ b/trunk/arch/x86/kernel/apic/summit_32.c @@ -173,6 +173,13 @@ static inline int is_WPEG(struct rio_detail *rio){ rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); } + +/* In clustered mode, the high nibble of APIC ID is a cluster number. + * The low nibble is a 4-bit bitmap. */ +#define XAPIC_DEST_CPUS_SHIFT 4 +#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) +#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) + #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) static const struct cpumask *summit_target_cpus(void) diff --git a/trunk/arch/x86/kernel/apic/x2apic_cluster.c b/trunk/arch/x86/kernel/apic/x2apic_cluster.c index 8e4cbb255c38..4a903e2f0d17 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_cluster.c +++ b/trunk/arch/x86/kernel/apic/x2apic_cluster.c @@ -10,7 +10,7 @@ #include #include -static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); +DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { diff --git a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c index 780a733a5e7a..2bda69352976 100644 --- a/trunk/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/trunk/arch/x86/kernel/apic/x2apic_uv_x.c @@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) cpumask_set_cpu(cpu, retmask); } -static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) +static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) { #ifdef CONFIG_SMP unsigned long val; @@ -583,18 +583,15 @@ void __init uv_system_init(void) bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); uv_blade_info = kmalloc(bytes, GFP_KERNEL); - BUG_ON(!uv_blade_info); get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); - BUG_ON(!uv_node_to_blade); memset(uv_node_to_blade, 255, bytes); bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); - BUG_ON(!uv_cpu_to_blade); memset(uv_cpu_to_blade, 255, bytes); blade = 0; diff --git a/trunk/arch/x86/kernel/asm-offsets_32.c b/trunk/arch/x86/kernel/asm-offsets_32.c index 1a830cbd7015..5a6aa1c1162f 100644 --- a/trunk/arch/x86/kernel/asm-offsets_32.c +++ b/trunk/arch/x86/kernel/asm-offsets_32.c @@ -146,5 +146,4 @@ void foo(void) OFFSET(BP_loadflags, boot_params, hdr.loadflags); OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); OFFSET(BP_version, boot_params, hdr.version); - OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); } diff --git a/trunk/arch/x86/kernel/asm-offsets_64.c b/trunk/arch/x86/kernel/asm-offsets_64.c index 898ecc47e129..e72f062fb4b5 100644 --- a/trunk/arch/x86/kernel/asm-offsets_64.c +++ b/trunk/arch/x86/kernel/asm-offsets_64.c @@ -125,7 +125,6 @@ int main(void) OFFSET(BP_loadflags, boot_params, hdr.loadflags); OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); OFFSET(BP_version, boot_params, hdr.version); - OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment); BLANK(); DEFINE(PAGE_SIZE_asm, PAGE_SIZE); diff --git a/trunk/arch/x86/kernel/cpu/amd.c b/trunk/arch/x86/kernel/cpu/amd.c index 728b3750a3e8..7e4a459daa64 100644 --- a/trunk/arch/x86/kernel/cpu/amd.c +++ b/trunk/arch/x86/kernel/cpu/amd.c @@ -272,7 +272,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) int cpu = smp_processor_id(); int node; - unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; + unsigned apicid = hard_smp_processor_id(); node = c->phys_proc_id; if (apicid_to_node[apicid] != NUMA_NO_NODE) diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index b0517aa2bd3b..77848d9fca68 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -299,8 +299,7 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) return NULL; /* Not found */ } -__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; -__u32 cpu_caps_set[NCAPINTS] __cpuinitdata; +__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; void load_percpu_segment(int cpu) { @@ -769,12 +768,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_identify) this_cpu->c_identify(c); - /* Clear/Set all flags overriden by options, after probe */ - for (i = 0; i < NCAPINTS; i++) { - c->x86_capability[i] &= ~cpu_caps_cleared[i]; - c->x86_capability[i] |= cpu_caps_set[i]; - } - #ifdef CONFIG_X86_64 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif @@ -820,16 +813,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) #endif init_hypervisor(c); - - /* - * Clear/Set all flags overriden by options, need do it - * before following smp all cpus cap AND. - */ - for (i = 0; i < NCAPINTS; i++) { - c->x86_capability[i] &= ~cpu_caps_cleared[i]; - c->x86_capability[i] |= cpu_caps_set[i]; - } - /* * On SMP, boot_cpu_data holds the common feature set between * all CPUs; so make sure that we indicate which features are @@ -842,6 +825,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } + /* Clear all flags overriden by options */ + for (i = 0; i < NCAPINTS; i++) + c->x86_capability[i] &= ~cleared_cpu_caps[i]; + #ifdef CONFIG_X86_MCE /* Init Machine Check Exception if available. */ mcheck_init(c); diff --git a/trunk/arch/x86/kernel/cpu/cpu_debug.c b/trunk/arch/x86/kernel/cpu/cpu_debug.c index 6b2a52dd0403..46e29ab96c6a 100644 --- a/trunk/arch/x86/kernel/cpu/cpu_debug.c +++ b/trunk/arch/x86/kernel/cpu/cpu_debug.c @@ -32,7 +32,9 @@ static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); +static DEFINE_PER_CPU(unsigned, cpu_modelflag); static DEFINE_PER_CPU(int, cpu_priv_count); +static DEFINE_PER_CPU(unsigned, cpu_model); static DEFINE_MUTEX(cpu_debug_lock); @@ -78,102 +80,302 @@ static struct cpu_file_base cpu_file[] = { { "value", CPU_REG_ALL, 1 }, }; -/* CPU Registers Range */ -static struct cpu_debug_range cpu_reg_range[] = { - { 0x00000000, 0x00000001, CPU_MC, }, - { 0x00000006, 0x00000007, CPU_MONITOR, }, - { 0x00000010, 0x00000010, CPU_TIME, }, - { 0x00000011, 0x00000013, CPU_PMC, }, - { 0x00000017, 0x00000017, CPU_PLATFORM, }, - { 0x0000001B, 0x0000001B, CPU_APIC, }, - { 0x0000002A, 0x0000002B, CPU_POWERON, }, - { 0x0000002C, 0x0000002C, CPU_FREQ, }, - { 0x0000003A, 0x0000003A, CPU_CONTROL, }, - { 0x00000040, 0x00000047, CPU_LBRANCH, }, - { 0x00000060, 0x00000067, CPU_LBRANCH, }, - { 0x00000079, 0x00000079, CPU_BIOS, }, - { 0x00000088, 0x0000008A, CPU_CACHE, }, - { 0x0000008B, 0x0000008B, CPU_BIOS, }, - { 0x0000009B, 0x0000009B, CPU_MONITOR, }, - { 0x000000C1, 0x000000C4, CPU_PMC, }, - { 0x000000CD, 0x000000CD, CPU_FREQ, }, - { 0x000000E7, 0x000000E8, CPU_PERF, }, - { 0x000000FE, 0x000000FE, CPU_MTRR, }, - - { 0x00000116, 0x0000011E, CPU_CACHE, }, - { 0x00000174, 0x00000176, CPU_SYSENTER, }, - { 0x00000179, 0x0000017B, CPU_MC, }, - { 0x00000186, 0x00000189, CPU_PMC, }, - { 0x00000198, 0x00000199, CPU_PERF, }, - { 0x0000019A, 0x0000019A, CPU_TIME, }, - { 0x0000019B, 0x0000019D, CPU_THERM, }, - { 0x000001A0, 0x000001A0, CPU_MISC, }, - { 0x000001C9, 0x000001C9, CPU_LBRANCH, }, - { 0x000001D7, 0x000001D8, CPU_LBRANCH, }, - { 0x000001D9, 0x000001D9, CPU_DEBUG, }, - { 0x000001DA, 0x000001E0, CPU_LBRANCH, }, - - { 0x00000200, 0x0000020F, CPU_MTRR, }, - { 0x00000250, 0x00000250, CPU_MTRR, }, - { 0x00000258, 0x00000259, CPU_MTRR, }, - { 0x00000268, 0x0000026F, CPU_MTRR, }, - { 0x00000277, 0x00000277, CPU_PAT, }, - { 0x000002FF, 0x000002FF, CPU_MTRR, }, - - { 0x00000300, 0x00000311, CPU_PMC, }, - { 0x00000345, 0x00000345, CPU_PMC, }, - { 0x00000360, 0x00000371, CPU_PMC, }, - { 0x0000038D, 0x00000390, CPU_PMC, }, - { 0x000003A0, 0x000003BE, CPU_PMC, }, - { 0x000003C0, 0x000003CD, CPU_PMC, }, - { 0x000003E0, 0x000003E1, CPU_PMC, }, - { 0x000003F0, 0x000003F2, CPU_PMC, }, - - { 0x00000400, 0x00000417, CPU_MC, }, - { 0x00000480, 0x0000048B, CPU_VMX, }, - - { 0x00000600, 0x00000600, CPU_DEBUG, }, - { 0x00000680, 0x0000068F, CPU_LBRANCH, }, - { 0x000006C0, 0x000006CF, CPU_LBRANCH, }, - - { 0x000107CC, 0x000107D3, CPU_PMC, }, - - { 0xC0000080, 0xC0000080, CPU_FEATURES, }, - { 0xC0000081, 0xC0000084, CPU_CALL, }, - { 0xC0000100, 0xC0000102, CPU_BASE, }, - { 0xC0000103, 0xC0000103, CPU_TIME, }, - - { 0xC0010000, 0xC0010007, CPU_PMC, }, - { 0xC0010010, 0xC0010010, CPU_CONF, }, - { 0xC0010015, 0xC0010015, CPU_CONF, }, - { 0xC0010016, 0xC001001A, CPU_MTRR, }, - { 0xC001001D, 0xC001001D, CPU_MTRR, }, - { 0xC001001F, 0xC001001F, CPU_CONF, }, - { 0xC0010030, 0xC0010035, CPU_BIOS, }, - { 0xC0010044, 0xC0010048, CPU_MC, }, - { 0xC0010050, 0xC0010056, CPU_SMM, }, - { 0xC0010058, 0xC0010058, CPU_CONF, }, - { 0xC0010060, 0xC0010060, CPU_CACHE, }, - { 0xC0010061, 0xC0010068, CPU_SMM, }, - { 0xC0010069, 0xC001006B, CPU_SMM, }, - { 0xC0010070, 0xC0010071, CPU_SMM, }, - { 0xC0010111, 0xC0010113, CPU_SMM, }, - { 0xC0010114, 0xC0010118, CPU_SVM, }, - { 0xC0010140, 0xC0010141, CPU_OSVM, }, - { 0xC0011022, 0xC0011023, CPU_CONF, }, +/* Intel Registers Range */ +static struct cpu_debug_range cpu_intel_range[] = { + { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL }, + { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE }, + { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL }, + { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM }, + { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE }, + { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE }, + + { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE }, + { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON }, + { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON }, + { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE }, + + { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE }, + { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT }, + { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT }, + { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM }, + + { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE }, + { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 }, + { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE }, + { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON }, + + { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT }, + { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT }, + { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT }, + { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE }, + + { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 }, + { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 }, + { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX }, + { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 }, + { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT }, + + { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE }, + { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE }, + { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE }, + { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT }, + { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE }, + { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE }, + { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE }, + { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE }, + + { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT }, + { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON }, + { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE }, + { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON }, + { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE }, + { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 }, + { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE }, + { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 }, + + { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE }, + { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE }, + { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE }, + { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE }, + { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE }, + { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE }, + + { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON }, + { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE }, + { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON }, + { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT }, + { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON }, + { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT }, + { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON }, + { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON }, + { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON }, + { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON }, + { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE }, + { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON }, + + { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE }, + { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON }, + { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE }, + { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON }, + { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE }, + { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON }, + { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE }, + { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON }, + { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE }, + { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE }, + { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE }, + + { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE }, + { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON }, + { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON }, + + { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP }, + + { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON }, + { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON }, + { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON }, + { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON }, }; +/* AMD Registers Range */ +static struct cpu_debug_range cpu_amd_range[] = { + { 0x00000000, 0x00000001, CPU_MC, CPU_K10_PLUS, }, + { 0x00000010, 0x00000010, CPU_TIME, CPU_K8_PLUS, }, + { 0x0000001B, 0x0000001B, CPU_APIC, CPU_K8_PLUS, }, + { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_K7_PLUS }, + { 0x0000008B, 0x0000008B, CPU_VER, CPU_K8_PLUS }, + { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_K8_PLUS, }, + + { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_K8_PLUS, }, + { 0x00000179, 0x0000017B, CPU_MC, CPU_K8_PLUS, }, + { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_K8_PLUS, }, + { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_K8_PLUS, }, + + { 0x00000200, 0x0000020F, CPU_MTRR, CPU_K8_PLUS, }, + { 0x00000250, 0x00000250, CPU_MTRR, CPU_K8_PLUS, }, + { 0x00000258, 0x00000259, CPU_MTRR, CPU_K8_PLUS, }, + { 0x00000268, 0x0000026F, CPU_MTRR, CPU_K8_PLUS, }, + { 0x00000277, 0x00000277, CPU_PAT, CPU_K8_PLUS, }, + { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_K8_PLUS, }, + + { 0x00000400, 0x00000413, CPU_MC, CPU_K8_PLUS, }, + + { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_AMD_ALL, }, + { 0xC0000081, 0xC0000084, CPU_CALL, CPU_K8_PLUS, }, + { 0xC0000100, 0xC0000102, CPU_BASE, CPU_K8_PLUS, }, + { 0xC0000103, 0xC0000103, CPU_TIME, CPU_K10_PLUS, }, + + { 0xC0010000, 0xC0010007, CPU_PMC, CPU_K8_PLUS, }, + { 0xC0010010, 0xC0010010, CPU_CONF, CPU_K7_PLUS, }, + { 0xC0010015, 0xC0010015, CPU_CONF, CPU_K7_PLUS, }, + { 0xC0010016, 0xC001001A, CPU_MTRR, CPU_K8_PLUS, }, + { 0xC001001D, 0xC001001D, CPU_MTRR, CPU_K8_PLUS, }, + { 0xC001001F, 0xC001001F, CPU_CONF, CPU_K8_PLUS, }, + { 0xC0010030, 0xC0010035, CPU_BIOS, CPU_K8_PLUS, }, + { 0xC0010044, 0xC0010048, CPU_MC, CPU_K8_PLUS, }, + { 0xC0010050, 0xC0010056, CPU_SMM, CPU_K0F_PLUS, }, + { 0xC0010058, 0xC0010058, CPU_CONF, CPU_K10_PLUS, }, + { 0xC0010060, 0xC0010060, CPU_CACHE, CPU_AMD_11, }, + { 0xC0010061, 0xC0010068, CPU_SMM, CPU_K10_PLUS, }, + { 0xC0010069, 0xC001006B, CPU_SMM, CPU_AMD_11, }, + { 0xC0010070, 0xC0010071, CPU_SMM, CPU_K10_PLUS, }, + { 0xC0010111, 0xC0010113, CPU_SMM, CPU_K8_PLUS, }, + { 0xC0010114, 0xC0010118, CPU_SVM, CPU_K10_PLUS, }, + { 0xC0010140, 0xC0010141, CPU_OSVM, CPU_K10_PLUS, }, + { 0xC0011022, 0xC0011023, CPU_CONF, CPU_K10_PLUS, }, +}; + + +/* Intel */ +static int get_intel_modelflag(unsigned model) +{ + int flag; + + switch (model) { + case 0x0501: + case 0x0502: + case 0x0504: + flag = CPU_INTEL_PENTIUM; + break; + case 0x0601: + case 0x0603: + case 0x0605: + case 0x0607: + case 0x0608: + case 0x060A: + case 0x060B: + flag = CPU_INTEL_P6; + break; + case 0x0609: + case 0x060D: + flag = CPU_INTEL_PENTIUM_M; + break; + case 0x060E: + flag = CPU_INTEL_CORE; + break; + case 0x060F: + case 0x0617: + flag = CPU_INTEL_CORE2; + break; + case 0x061C: + flag = CPU_INTEL_ATOM; + break; + case 0x0F00: + case 0x0F01: + case 0x0F02: + case 0x0F03: + case 0x0F04: + flag = CPU_INTEL_XEON_P4; + break; + case 0x0F06: + flag = CPU_INTEL_XEON_MP; + break; + default: + flag = CPU_NONE; + break; + } + + return flag; +} + +/* AMD */ +static int get_amd_modelflag(unsigned model) +{ + int flag; + + switch (model >> 8) { + case 0x6: + flag = CPU_AMD_K6; + break; + case 0x7: + flag = CPU_AMD_K7; + break; + case 0x8: + flag = CPU_AMD_K8; + break; + case 0xf: + flag = CPU_AMD_0F; + break; + case 0x10: + flag = CPU_AMD_10; + break; + case 0x11: + flag = CPU_AMD_11; + break; + default: + flag = CPU_NONE; + break; + } + + return flag; +} + +static int get_cpu_modelflag(unsigned cpu) +{ + int flag; + + flag = per_cpu(cpu_model, cpu); + + switch (flag >> 16) { + case X86_VENDOR_INTEL: + flag = get_intel_modelflag(flag); + break; + case X86_VENDOR_AMD: + flag = get_amd_modelflag(flag & 0xffff); + break; + default: + flag = CPU_NONE; + break; + } + + return flag; +} + +static int get_cpu_range_count(unsigned cpu) +{ + int index; + + switch (per_cpu(cpu_model, cpu) >> 16) { + case X86_VENDOR_INTEL: + index = ARRAY_SIZE(cpu_intel_range); + break; + case X86_VENDOR_AMD: + index = ARRAY_SIZE(cpu_amd_range); + break; + default: + index = 0; + break; + } + + return index; +} + static int is_typeflag_valid(unsigned cpu, unsigned flag) { - int i; + unsigned vendor, modelflag; + int i, index; /* Standard Registers should be always valid */ if (flag >= CPU_TSS) return 1; - for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { - if (cpu_reg_range[i].flag == flag) - return 1; + modelflag = per_cpu(cpu_modelflag, cpu); + vendor = per_cpu(cpu_model, cpu) >> 16; + index = get_cpu_range_count(cpu); + + for (i = 0; i < index; i++) { + switch (vendor) { + case X86_VENDOR_INTEL: + if ((cpu_intel_range[i].model & modelflag) && + (cpu_intel_range[i].flag & flag)) + return 1; + break; + case X86_VENDOR_AMD: + if ((cpu_amd_range[i].model & modelflag) && + (cpu_amd_range[i].flag & flag)) + return 1; + break; + } } /* Invalid */ @@ -183,11 +385,26 @@ static int is_typeflag_valid(unsigned cpu, unsigned flag) static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, int index, unsigned flag) { - if (cpu_reg_range[index].flag == flag) { - *min = cpu_reg_range[index].min; - *max = cpu_reg_range[index].max; - } else - *max = 0; + unsigned modelflag; + + modelflag = per_cpu(cpu_modelflag, cpu); + *max = 0; + switch (per_cpu(cpu_model, cpu) >> 16) { + case X86_VENDOR_INTEL: + if ((cpu_intel_range[index].model & modelflag) && + (cpu_intel_range[index].flag & flag)) { + *min = cpu_intel_range[index].min; + *max = cpu_intel_range[index].max; + } + break; + case X86_VENDOR_AMD: + if ((cpu_amd_range[index].model & modelflag) && + (cpu_amd_range[index].flag & flag)) { + *min = cpu_amd_range[index].min; + *max = cpu_amd_range[index].max; + } + break; + } return *max; } @@ -217,7 +434,7 @@ static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) unsigned msr, msr_min, msr_max; struct cpu_private *priv; u32 low, high; - int i; + int i, range; if (seq) { priv = seq->private; @@ -229,7 +446,9 @@ static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) } } - for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { + range = get_cpu_range_count(cpu); + + for (i = 0; i < range; i++) { if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) continue; @@ -369,20 +588,8 @@ static void print_apic(void *arg) seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); - if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { - unsigned int i, v, maxeilvt; - - v = apic_read(APIC_EFEAT); - maxeilvt = (v >> 16) & 0xff; - seq_printf(seq, " EFEAT\t\t: %08x\n", v); - seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL)); - - for (i = 0; i < maxeilvt; i++) { - v = apic_read(APIC_EILVTn(i)); - seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v); - } - } #endif /* CONFIG_X86_LOCAL_APIC */ + seq_printf(seq, "\n MSR\t:\n"); } @@ -581,11 +788,13 @@ static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) { struct dentry *cpu_dentry = NULL; unsigned reg, reg_min, reg_max; - int i, err = 0; + int i, range, err = 0; char reg_dir[12]; u32 low, high; - for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { + range = get_cpu_range_count(cpu); + + for (i = 0; i < range; i++) { if (!get_cpu_range(cpu, ®_min, ®_max, i, cpu_base[type].flag)) continue; @@ -641,6 +850,10 @@ static int cpu_init_cpu(void) cpui = &cpu_data(cpu); if (!cpu_has(cpui, X86_FEATURE_MSR)) continue; + per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) | + (cpui->x86 << 8) | + (cpui->x86_model)); + per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu); sprintf(cpu_dir, "cpu%d", cpu); cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig b/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig index f138c6c389b9..52c839875478 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/trunk/arch/x86/kernel/cpu/cpufreq/Kconfig @@ -220,14 +220,11 @@ config X86_LONGHAUL If in doubt, say N. config X86_E_POWERSAVER - tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" + tristate "VIA C7 Enhanced PowerSaver" select CPU_FREQ_TABLE - depends on X86_32 && EXPERIMENTAL + depends on X86_32 help - This adds the CPUFreq driver for VIA C7 processors. However, this driver - does not have any safeguards to prevent operating the CPU out of spec - and is thus considered dangerous. Please use the regular ACPI cpufreq - driver, enabled by CONFIG_X86_ACPI_CPUFREQ. + This adds the CPUFreq driver for VIA C7 processors. If in doubt, say N. diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ae9b503220ca..54b6de2cd947 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -90,7 +90,11 @@ static int check_est_cpu(unsigned int cpuid) { struct cpuinfo_x86 *cpu = &cpu_data(cpuid); - return cpu_has(cpu, X86_FEATURE_EST); + if (cpu->x86_vendor != X86_VENDOR_INTEL || + !cpu_has(cpu, X86_FEATURE_EST)) + return 0; + + return 1; } static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) @@ -546,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void) return -ENOMEM; } for_each_possible_cpu(i) { - if (!zalloc_cpumask_var_node( + if (!alloc_cpumask_var_node( &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, GFP_KERNEL, cpu_to_node(i))) { diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index d47c775eb0ab..a8363e5be4ef 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -322,7 +322,7 @@ static int powernow_acpi_init(void) goto err0; } - if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, + if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, GFP_KERNEL)) { retval = -ENOMEM; goto err05; diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index cf52215d9eb1..f6b32d112357 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -835,7 +835,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { struct cpufreq_frequency_table *powernow_table; int ret_val = -ENODEV; - acpi_integer control, status; + acpi_integer space_id; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { dprintk("register performance failed: bad ACPI data\n"); @@ -848,13 +848,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) goto err_out; } - control = data->acpi_data.control_register.space_id; - status = data->acpi_data.status_register.space_id; - - if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || - (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + space_id = data->acpi_data.control_register.space_id; + if ((space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { dprintk("Invalid control/status registers (%x - %x)\n", - control, status); + data->acpi_data.control_register.space_id, + space_id); goto err_out; } @@ -887,7 +886,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); - if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { + if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { printk(KERN_ERR PFX "unable to alloc powernow_k8_data cpumask\n"); ret_val = -ENOMEM; diff --git a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 55c831ed71ce..c9f1fdc02830 100644 --- a/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/trunk/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy, if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) return -ENOMEM; - if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { + if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { free_cpumask_var(saved_mask); return -ENOMEM; } diff --git a/trunk/arch/x86/kernel/cpu/intel.c b/trunk/arch/x86/kernel/cpu/intel.c index daed39ba2614..7437fa133c02 100644 --- a/trunk/arch/x86/kernel/cpu/intel.c +++ b/trunk/arch/x86/kernel/cpu/intel.c @@ -229,12 +229,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) } #endif -static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) +static void __cpuinit srat_detect_node(void) { #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) unsigned node; int cpu = smp_processor_id(); - int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; + int apicid = hard_smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ @@ -400,7 +400,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) } /* Work around errata */ - srat_detect_node(c); + srat_detect_node(); if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); diff --git a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c index 789efe217e1a..483eda96e102 100644 --- a/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/trunk/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -17,7 +17,6 @@ #include #include -#include #define LVL_1_INST 1 #define LVL_1_DATA 2 @@ -160,6 +159,14 @@ struct _cpuid4_info_regs { unsigned long can_disable; }; +#if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) +static struct pci_device_id k8_nb_id[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, + {} +}; +#endif + unsigned short num_cache_leaves; /* AMD doesn't have CPUID4. Emulate it here to report the same @@ -200,17 +207,10 @@ union l3_cache { }; static const unsigned short __cpuinitconst assocs[] = { - [1] = 1, - [2] = 2, - [4] = 4, - [6] = 8, - [8] = 16, - [0xa] = 32, - [0xb] = 48, + [1] = 1, [2] = 2, [4] = 4, [6] = 8, + [8] = 16, [0xa] = 32, [0xb] = 48, [0xc] = 64, - [0xd] = 96, - [0xe] = 128, - [0xf] = 0xffff /* fully associative - no way to show this currently */ + [0xf] = 0xffff // ?? }; static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; @@ -271,8 +271,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, eax->split.type = types[leaf]; eax->split.level = levels[leaf]; if (leaf == 3) - eax->split.num_threads_sharing = - current_cpu_data.x86_max_cores - 1; + eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1; else eax->split.num_threads_sharing = 0; eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; @@ -292,14 +291,6 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) { if (index < 3) return; - - if (boot_cpu_data.x86 == 0x11) - return; - - /* see erratum #382 */ - if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8)) - return; - this_leaf->can_disable = 1; } @@ -705,75 +696,97 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) #define to_object(k) container_of(k, struct _index_kobject, kobj) #define to_attr(a) container_of(a, struct _cache_attr, attr) -static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, - unsigned int index) +#ifdef CONFIG_PCI +static struct pci_dev *get_k8_northbridge(int node) +{ + struct pci_dev *dev = NULL; + int i; + + for (i = 0; i <= node; i++) { + do { + dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); + if (!dev) + break; + } while (!pci_match_id(&k8_nb_id[0], dev)); + if (!dev) + break; + } + return dev; +} +#else +static struct pci_dev *get_k8_northbridge(int node) +{ + return NULL; +} +#endif + +static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) { - int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); - int node = cpu_to_node(cpu); - struct pci_dev *dev = node_to_k8_nb_misc(node); - unsigned int reg = 0; + const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); + int node = cpu_to_node(cpumask_first(mask)); + struct pci_dev *dev = NULL; + ssize_t ret = 0; + int i; if (!this_leaf->can_disable) - return -EINVAL; + return sprintf(buf, "Feature not enabled\n"); - if (!dev) + dev = get_k8_northbridge(node); + if (!dev) { + printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); return -EINVAL; + } - pci_read_config_dword(dev, 0x1BC + index * 4, ®); - return sprintf(buf, "%x\n", reg); -} + for (i = 0; i < 2; i++) { + unsigned int reg; -#define SHOW_CACHE_DISABLE(index) \ -static ssize_t \ -show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \ -{ \ - return show_cache_disable(this_leaf, buf, index); \ + pci_read_config_dword(dev, 0x1BC + i * 4, ®); + + ret += sprintf(buf, "%sEntry: %d\n", buf, i); + ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n", + buf, + reg & 0x80000000 ? "Disabled" : "Allowed", + reg & 0x40000000 ? "Disabled" : "Allowed"); + ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", + buf, (reg & 0x30000) >> 16, reg & 0xfff); + } + return ret; } -SHOW_CACHE_DISABLE(0) -SHOW_CACHE_DISABLE(1) -static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, - const char *buf, size_t count, unsigned int index) +static ssize_t +store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, + size_t count) { - int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); - int node = cpu_to_node(cpu); - struct pci_dev *dev = node_to_k8_nb_misc(node); - unsigned long val = 0; - unsigned int scrubber = 0; + const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); + int node = cpu_to_node(cpumask_first(mask)); + struct pci_dev *dev = NULL; + unsigned int ret, index, val; if (!this_leaf->can_disable) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + return 0; - if (!dev) + if (strlen(buf) > 15) return -EINVAL; - if (strict_strtoul(buf, 10, &val) < 0) + ret = sscanf(buf, "%x %x", &index, &val); + if (ret != 2) + return -EINVAL; + if (index > 1) return -EINVAL; val |= 0xc0000000; - - pci_read_config_dword(dev, 0x58, &scrubber); - scrubber &= ~0x1f000000; - pci_write_config_dword(dev, 0x58, scrubber); + dev = get_k8_northbridge(node); + if (!dev) { + printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); + return -EINVAL; + } pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); wbinvd(); pci_write_config_dword(dev, 0x1BC + index * 4, val); - return count; -} -#define STORE_CACHE_DISABLE(index) \ -static ssize_t \ -store_cache_disable_##index(struct _cpuid4_info *this_leaf, \ - const char *buf, size_t count) \ -{ \ - return store_cache_disable(this_leaf, buf, count, index); \ + return 1; } -STORE_CACHE_DISABLE(0) -STORE_CACHE_DISABLE(1) struct _cache_attr { struct attribute attr; @@ -795,10 +808,7 @@ define_one_ro(size); define_one_ro(shared_cpu_map); define_one_ro(shared_cpu_list); -static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, - show_cache_disable_0, store_cache_disable_0); -static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, - show_cache_disable_1, store_cache_disable_1); +static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable); static struct attribute * default_attrs[] = { &type.attr, @@ -810,8 +820,7 @@ static struct attribute * default_attrs[] = { &size.attr, &shared_cpu_map.attr, &shared_cpu_list.attr, - &cache_disable_0.attr, - &cache_disable_1.attr, + &cache_disable.attr, NULL }; diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_64.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_64.c index 09dd1d414fc3..6fb0b359d2a5 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -1163,7 +1163,7 @@ static __init int mce_init_device(void) if (!mce_available(&boot_cpu_data)) return -EIO; - zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); + alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); err = mce_init_banks(); if (err) diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 65a0fceedcd7..cef3ee30744b 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -15,6 +15,7 @@ #include #include #include +#include asmlinkage void smp_thermal_interrupt(void) { diff --git a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c index 1d584a18a50d..ce0fe4b5c04f 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -808,7 +808,7 @@ int __init mtrr_cleanup(unsigned address_bits) if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) return 0; - rdmsr(MSR_MTRRdefType, def, dummy); + rdmsr(MTRRdefType_MSR, def, dummy); def &= 0xff; if (def != MTRR_TYPE_UNCACHABLE) return 0; @@ -1003,7 +1003,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) */ if (!is_cpu(INTEL) || disable_mtrr_trim) return 0; - rdmsr(MSR_MTRRdefType, def, dummy); + rdmsr(MTRRdefType_MSR, def, dummy); def &= 0xff; if (def != MTRR_TYPE_UNCACHABLE) return 0; diff --git a/trunk/arch/x86/kernel/cpu/mtrr/generic.c b/trunk/arch/x86/kernel/cpu/mtrr/generic.c index 0543f69f0b27..d21d4fb161f7 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/generic.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/generic.c @@ -20,9 +20,9 @@ struct fixed_range_block { }; static struct fixed_range_block fixed_range_blocks[] = { - { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ - { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ - { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ + { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ + { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ + { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ {} }; @@ -194,12 +194,12 @@ get_fixed_ranges(mtrr_type * frs) k8_check_syscfg_dram_mod_en(); - rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); + rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); for (i = 0; i < 2; i++) - rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); + rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); for (i = 0; i < 8; i++) - rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); + rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); } void mtrr_save_fixed_ranges(void *info) @@ -310,7 +310,7 @@ void __init get_mtrr_state(void) vrs = mtrr_state.var_ranges; - rdmsr(MSR_MTRRcap, lo, dummy); + rdmsr(MTRRcap_MSR, lo, dummy); mtrr_state.have_fixed = (lo >> 8) & 1; for (i = 0; i < num_var_ranges; i++) @@ -318,7 +318,7 @@ void __init get_mtrr_state(void) if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); - rdmsr(MSR_MTRRdefType, lo, dummy); + rdmsr(MTRRdefType_MSR, lo, dummy); mtrr_state.def_type = (lo & 0xff); mtrr_state.enabled = (lo & 0xc00) >> 10; @@ -583,10 +583,10 @@ static void prepare_set(void) __acquires(set_atomicity_lock) __flush_tlb(); /* Save MTRR state */ - rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); + rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); } static void post_set(void) __releases(set_atomicity_lock) @@ -595,7 +595,7 @@ static void post_set(void) __releases(set_atomicity_lock) __flush_tlb(); /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); @@ -707,7 +707,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, unsigned i static int generic_have_wrcomb(void) { unsigned long config, dummy; - rdmsr(MSR_MTRRcap, config, dummy); + rdmsr(MTRRcap_MSR, config, dummy); return (config & (1 << 10)); } diff --git a/trunk/arch/x86/kernel/cpu/mtrr/main.c b/trunk/arch/x86/kernel/cpu/mtrr/main.c index 8fc248b5aeaf..03cda01f57c7 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/main.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/main.c @@ -104,7 +104,7 @@ static void __init set_num_var_ranges(void) unsigned long config = 0, dummy; if (use_intel()) { - rdmsr(MSR_MTRRcap, config, dummy); + rdmsr(MTRRcap_MSR, config, dummy); } else if (is_cpu(AMD)) config = 2; else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) diff --git a/trunk/arch/x86/kernel/cpu/mtrr/mtrr.h b/trunk/arch/x86/kernel/cpu/mtrr/mtrr.h index 7538b767f206..77f67f7b347a 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/trunk/arch/x86/kernel/cpu/mtrr/mtrr.h @@ -5,6 +5,21 @@ #include #include +#define MTRRcap_MSR 0x0fe +#define MTRRdefType_MSR 0x2ff + +#define MTRRfix64K_00000_MSR 0x250 +#define MTRRfix16K_80000_MSR 0x258 +#define MTRRfix16K_A0000_MSR 0x259 +#define MTRRfix4K_C0000_MSR 0x268 +#define MTRRfix4K_C8000_MSR 0x269 +#define MTRRfix4K_D0000_MSR 0x26a +#define MTRRfix4K_D8000_MSR 0x26b +#define MTRRfix4K_E0000_MSR 0x26c +#define MTRRfix4K_E8000_MSR 0x26d +#define MTRRfix4K_F0000_MSR 0x26e +#define MTRRfix4K_F8000_MSR 0x26f + #define MTRR_CHANGE_MASK_FIXED 0x01 #define MTRR_CHANGE_MASK_VARIABLE 0x02 #define MTRR_CHANGE_MASK_DEFTYPE 0x04 diff --git a/trunk/arch/x86/kernel/cpu/mtrr/state.c b/trunk/arch/x86/kernel/cpu/mtrr/state.c index 1f5fb1588d1f..7f7e2753685b 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/state.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/state.c @@ -35,7 +35,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) if (use_intel()) /* Save MTRR state */ - rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); + rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); else /* Cyrix ARRs - everything else were excluded at the top */ ctxt->ccr3 = getCx86(CX86_CCR3); @@ -46,7 +46,7 @@ void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) { if (use_intel()) /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi); else if (is_cpu(CYRIX)) /* Cyrix ARRs - everything else were excluded at the top */ @@ -64,7 +64,7 @@ void set_mtrr_done(struct set_mtrr_context *ctxt) /* Restore MTRRdefType */ if (use_intel()) /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); else /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ctxt->ccr3); diff --git a/trunk/arch/x86/kernel/dumpstack.h b/trunk/arch/x86/kernel/dumpstack.h index 81086c227ab7..da87590b8698 100644 --- a/trunk/arch/x86/kernel/dumpstack.h +++ b/trunk/arch/x86/kernel/dumpstack.h @@ -29,6 +29,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl); extern unsigned int code_bytes; +extern int kstack_depth_to_print; /* The form of the top of the frame on the stack */ struct stack_frame { diff --git a/trunk/arch/x86/kernel/e820.c b/trunk/arch/x86/kernel/e820.c index 7271fa33d791..006281302925 100644 --- a/trunk/arch/x86/kernel/e820.c +++ b/trunk/arch/x86/kernel/e820.c @@ -617,7 +617,7 @@ __init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, */ __init void e820_setup_gap(void) { - unsigned long gapstart, gapsize; + unsigned long gapstart, gapsize, round; int found; gapstart = 0x10000000; @@ -635,9 +635,14 @@ __init void e820_setup_gap(void) #endif /* - * e820_reserve_resources_late protect stolen RAM already + * See how much we want to round up: start off with + * rounding to the next 1MB area. */ - pci_mem_start = gapstart; + round = 0x100000; + while ((gapsize >> 4) > round) + round += round; + /* Fun with two's complement */ + pci_mem_start = (gapstart + round) & -round; printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", @@ -1366,23 +1371,6 @@ void __init e820_reserve_resources(void) } } -/* How much should we pad RAM ending depending on where it is? */ -static unsigned long ram_alignment(resource_size_t pos) -{ - unsigned long mb = pos >> 20; - - /* To 64kB in the first megabyte */ - if (!mb) - return 64*1024; - - /* To 1MB in the first 16MB */ - if (mb < 16) - return 1024*1024; - - /* To 32MB for anything above that */ - return 32*1024*1024; -} - void __init e820_reserve_resources_late(void) { int i; @@ -1394,24 +1382,6 @@ void __init e820_reserve_resources_late(void) insert_resource_expand_to_fit(&iomem_resource, res); res++; } - - /* - * Try to bump up RAM regions to reasonable boundaries to - * avoid stolen RAM: - */ - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *entry = &e820_saved.map[i]; - resource_size_t start, end; - - if (entry->type != E820_RAM) - continue; - start = entry->addr + entry->size; - end = round_up(start, ram_alignment(start)); - if (start == end) - continue; - reserve_region_with_split(&iomem_resource, start, - end - 1, "RAM buffer"); - } } char *__init default_machine_specific_memory_setup(void) diff --git a/trunk/arch/x86/kernel/early-quirks.c b/trunk/arch/x86/kernel/early-quirks.c index ebdb85cf2686..76b8cd953dee 100644 --- a/trunk/arch/x86/kernel/early-quirks.c +++ b/trunk/arch/x86/kernel/early-quirks.c @@ -96,7 +96,6 @@ static void __init nvidia_bugs(int num, int slot, int func) } -#if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC) static u32 __init ati_ixp4x0_rev(int num, int slot, int func) { @@ -115,7 +114,6 @@ static u32 __init ati_ixp4x0_rev(int num, int slot, int func) d &= 0xff; return d; } -#endif static void __init ati_bugs(int num, int slot, int func) { diff --git a/trunk/arch/x86/kernel/head_32.S b/trunk/arch/x86/kernel/head_32.S index dc5ed4bdd88d..30683883e0cd 100644 --- a/trunk/arch/x86/kernel/head_32.S +++ b/trunk/arch/x86/kernel/head_32.S @@ -608,6 +608,13 @@ ignore_int: ENTRY(initial_code) .long i386_start_kernel +.section .text +/* + * Real beginning of normal "text" segment + */ +ENTRY(stext) +ENTRY(_stext) + /* * BSS section */ diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 9a391bbb8ba8..c3fe010d74c8 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -12,7 +12,6 @@ #include #include #include -#include atomic_t irq_err_count; @@ -25,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL; */ void ack_bad_irq(unsigned int irq) { - if (printk_ratelimit()) - pr_err("unexpected IRQ trap at vector %02x\n", irq); + printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); +#ifdef CONFIG_X86_LOCAL_APIC /* * Currently unexpected vectors happen only on SMP and APIC. * We _must_ ack these because every local APIC has only N @@ -37,7 +36,9 @@ void ack_bad_irq(unsigned int irq) * completely. * But only ack when the APIC is enabled -AK */ - ack_APIC_irq(); + if (cpu_has_apic) + ack_APIC_irq(); +#endif } #define irq_stats(x) (&per_cpu(irq_stat, x)) @@ -177,7 +178,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->irq_thermal_count; # ifdef CONFIG_X86_64 sum += irq_stats(cpu)->irq_threshold_count; -# endif +#endif #endif return sum; } @@ -212,11 +213,14 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) irq = __get_cpu_var(vector_irq)[vector]; if (!handle_irq(irq, regs)) { - ack_APIC_irq(); +#ifdef CONFIG_X86_64 + if (!disable_apic) + ack_APIC_irq(); +#endif if (printk_ratelimit()) - pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", - __func__, smp_processor_id(), vector, irq); + printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n", + __func__, smp_processor_id(), vector, irq); } irq_exit(); diff --git a/trunk/arch/x86/kernel/irqinit.c b/trunk/arch/x86/kernel/irqinit_32.c similarity index 68% rename from trunk/arch/x86/kernel/irqinit.c rename to trunk/arch/x86/kernel/irqinit_32.c index 2e08b10ad51a..368b0a8836f9 100644 --- a/trunk/arch/x86/kernel/irqinit.c +++ b/trunk/arch/x86/kernel/irqinit_32.c @@ -1,25 +1,20 @@ -#include #include #include #include #include #include -#include #include #include -#include #include #include #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -27,23 +22,7 @@ #include #include -/* - * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: - * (these are usually mapped to vectors 0x30-0x3f) - */ - -/* - * The IO-APIC gives us many more interrupt sources. Most of these - * are unused but an SMP system is supposed to have enough memory ... - * sometimes (mostly wrt. hw bugs) we get corrupted vectors all - * across the spectrum, so we really want to be prepared to get all - * of these. Plus, more powerful systems might have more than 64 - * IO-APIC registers. - * - * (these are usually mapped into the 0x30-0xff vector range) - */ -#ifdef CONFIG_X86_32 /* * Note that on a 486, we don't want to do a SIGFPE on an irq13 * as the irq is unreliable, and exception 16 works correctly @@ -73,7 +52,30 @@ static struct irqaction fpu_irq = { .handler = math_error_irq, .name = "fpu", }; + +void __init init_ISA_irqs(void) +{ + int i; + +#ifdef CONFIG_X86_LOCAL_APIC + init_bsp_APIC(); #endif + init_8259A(0); + + /* + * 16 old-style INTA-cycle interrupts: + */ + for (i = 0; i < NR_IRQS_LEGACY; i++) { + struct irq_desc *desc = irq_to_desc(i); + + desc->status = IRQ_DISABLED; + desc->action = NULL; + desc->depth = 1; + + set_irq_chip_and_handler_name(i, &i8259A_chip, + handle_level_irq, "XT"); + } +} /* * IRQ2 is cascade interrupt to second interrupt controller @@ -116,37 +118,29 @@ int vector_used_by_percpu_irq(unsigned int vector) return 0; } -static void __init init_ISA_irqs(void) +/* Overridden in paravirt.c */ +void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); + +void __init native_init_IRQ(void) { int i; -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) - init_bsp_APIC(); -#endif - init_8259A(0); + /* Execute any quirks before the call gates are initialised: */ + x86_quirk_pre_intr_init(); /* - * 16 old-style INTA-cycle interrupts: + * Cover the whole vector space, no vector can escape + * us. (some of these will be overridden and become + * 'special' SMP interrupts) */ - for (i = 0; i < NR_IRQS_LEGACY; i++) { - struct irq_desc *desc = irq_to_desc(i); - - desc->status = IRQ_DISABLED; - desc->action = NULL; - desc->depth = 1; - - set_irq_chip_and_handler_name(i, &i8259A_chip, - handle_level_irq, "XT"); + for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { + /* SYSCALL_VECTOR was reserved in trap_init. */ + if (i != SYSCALL_VECTOR) + set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); } -} -/* Overridden in paravirt.c */ -void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); -static void __init smp_intr_init(void) -{ -#ifdef CONFIG_SMP -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) /* * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. @@ -166,27 +160,16 @@ static void __init smp_intr_init(void) /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); - /* IPI for generic single function call */ + /* IPI for single call function */ alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, - call_function_single_interrupt); + call_function_single_interrupt); /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); #endif -#endif /* CONFIG_SMP */ -} - -static void __init apic_intr_init(void) -{ - smp_intr_init(); - -#ifdef CONFIG_X86_64 - alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); - alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); -#endif -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) +#ifdef CONFIG_X86_LOCAL_APIC /* self generated IPI for local APIC timer */ alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); @@ -196,67 +179,16 @@ static void __init apic_intr_init(void) /* IPI vectors for APIC spurious and error interrupts */ alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); - - /* Performance monitoring interrupts: */ -# ifdef CONFIG_PERF_COUNTERS - alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); - alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); -# endif - #endif -#ifdef CONFIG_X86_32 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) /* thermal monitor LVT interrupt */ alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif -#endif -} - -/** - * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors - * - * Description: - * Perform any necessary interrupt initialisation prior to setting up - * the "ordinary" interrupt call gates. For legacy reasons, the ISA - * interrupts should be initialised here if the machine emulates a PC - * in any way. - **/ -static void __init x86_quirk_pre_intr_init(void) -{ -#ifdef CONFIG_X86_32 - if (x86_quirks->arch_pre_intr_init) { - if (x86_quirks->arch_pre_intr_init()) - return; - } -#endif - init_ISA_irqs(); -} - -void __init native_init_IRQ(void) -{ - int i; - - /* Execute any quirks before the call gates are initialised: */ - x86_quirk_pre_intr_init(); - - apic_intr_init(); - - /* - * Cover the whole vector space, no vector can escape - * us. (some of these will be overridden and become - * 'special' SMP interrupts) - */ - for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { - /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ - if (!test_bit(i, used_vectors)) - set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); - } if (!acpi_ioapic) setup_irq(2, &irq2); -#ifdef CONFIG_X86_32 /* * Call quirks after call gates are initialised (usually add in * the architecture specific gates): @@ -271,5 +203,4 @@ void __init native_init_IRQ(void) setup_irq(FPU_IRQ, &fpu_irq); irq_ctx_init(smp_processor_id()); -#endif } diff --git a/trunk/arch/x86/kernel/irqinit_64.c b/trunk/arch/x86/kernel/irqinit_64.c new file mode 100644 index 000000000000..8cd10537fd46 --- /dev/null +++ b/trunk/arch/x86/kernel/irqinit_64.c @@ -0,0 +1,177 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: + * (these are usually mapped to vectors 0x30-0x3f) + */ + +/* + * The IO-APIC gives us many more interrupt sources. Most of these + * are unused but an SMP system is supposed to have enough memory ... + * sometimes (mostly wrt. hw bugs) we get corrupted vectors all + * across the spectrum, so we really want to be prepared to get all + * of these. Plus, more powerful systems might have more than 64 + * IO-APIC registers. + * + * (these are usually mapped into the 0x30-0xff vector range) + */ + +/* + * IRQ2 is cascade interrupt to second interrupt controller + */ + +static struct irqaction irq2 = { + .handler = no_action, + .name = "cascade", +}; +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... IRQ0_VECTOR - 1] = -1, + [IRQ0_VECTOR] = 0, + [IRQ1_VECTOR] = 1, + [IRQ2_VECTOR] = 2, + [IRQ3_VECTOR] = 3, + [IRQ4_VECTOR] = 4, + [IRQ5_VECTOR] = 5, + [IRQ6_VECTOR] = 6, + [IRQ7_VECTOR] = 7, + [IRQ8_VECTOR] = 8, + [IRQ9_VECTOR] = 9, + [IRQ10_VECTOR] = 10, + [IRQ11_VECTOR] = 11, + [IRQ12_VECTOR] = 12, + [IRQ13_VECTOR] = 13, + [IRQ14_VECTOR] = 14, + [IRQ15_VECTOR] = 15, + [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 +}; + +int vector_used_by_percpu_irq(unsigned int vector) +{ + int cpu; + + for_each_online_cpu(cpu) { + if (per_cpu(vector_irq, cpu)[vector] != -1) + return 1; + } + + return 0; +} + +static void __init init_ISA_irqs(void) +{ + int i; + + init_bsp_APIC(); + init_8259A(0); + + for (i = 0; i < NR_IRQS_LEGACY; i++) { + struct irq_desc *desc = irq_to_desc(i); + + desc->status = IRQ_DISABLED; + desc->action = NULL; + desc->depth = 1; + + /* + * 16 old-style INTA-cycle interrupts: + */ + set_irq_chip_and_handler_name(i, &i8259A_chip, + handle_level_irq, "XT"); + } +} + +void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); + +static void __init smp_intr_init(void) +{ +#ifdef CONFIG_SMP + /* + * The reschedule interrupt is a CPU-to-CPU reschedule-helper + * IPI, driven by wakeup. + */ + alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); + + /* IPIs for invalidation */ + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); + alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); + + /* IPI for generic function call */ + alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); + + /* IPI for generic single function call */ + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); + + /* Low priority IPI to cleanup after moving an irq */ + set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); +#endif +} + +static void __init apic_intr_init(void) +{ + smp_intr_init(); + + alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); + alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); + + /* self generated IPI for local APIC timer */ + alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); + + /* generic IPI for platform specific use */ + alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt); + + /* IPI vectors for APIC spurious and error interrupts */ + alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); + alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); +} + +void __init native_init_IRQ(void) +{ + int i; + + init_ISA_irqs(); + /* + * Cover the whole vector space, no vector can escape + * us. (some of these will be overridden and become + * 'special' SMP interrupts) + */ + for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { + int vector = FIRST_EXTERNAL_VECTOR + i; + if (vector != IA32_SYSCALL_VECTOR) + set_intr_gate(vector, interrupt[i]); + } + + apic_intr_init(); + + if (!acpi_ioapic) + setup_irq(2, &irq2); +} diff --git a/trunk/arch/x86/kernel/kgdb.c b/trunk/arch/x86/kernel/kgdb.c index 8d82a77a3f3b..b1f4dffb919e 100644 --- a/trunk/arch/x86/kernel/kgdb.c +++ b/trunk/arch/x86/kernel/kgdb.c @@ -142,7 +142,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); gdb_regs32[GDB_CS] = __KERNEL_CS; gdb_regs32[GDB_SS] = __KERNEL_DS; - gdb_regs[GDB_PC] = 0; + gdb_regs[GDB_PC] = p->thread.ip; gdb_regs[GDB_R8] = 0; gdb_regs[GDB_R9] = 0; gdb_regs[GDB_R10] = 0; diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c index 366baa179913..453b5795a5c6 100644 --- a/trunk/arch/x86/kernel/microcode_amd.c +++ b/trunk/arch/x86/kernel/microcode_amd.c @@ -13,13 +13,25 @@ * Licensed under the terms of the GNU General Public * License version 2. See file COPYING for details. */ +#include +#include +#include #include +#include +#include #include #include #include #include #include +#include +#include +#include +#include +#include #include +#include +#include #include #include @@ -67,6 +79,9 @@ struct microcode_amd { #define UCODE_CONTAINER_SECTION_HDR 8 #define UCODE_CONTAINER_HEADER_SIZE 12 +/* serialize access to the physical write */ +static DEFINE_SPINLOCK(microcode_update_lock); + static struct equiv_cpu_entry *equiv_cpu_table; static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) @@ -129,8 +144,9 @@ static int get_matching_microcode(int cpu, void *mc, int rev) return 1; } -static int apply_microcode_amd(int cpu) +static void apply_microcode_amd(int cpu) { + unsigned long flags; u32 rev, dummy; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; @@ -140,25 +156,25 @@ static int apply_microcode_amd(int cpu) BUG_ON(cpu_num != cpu); if (mc_amd == NULL) - return 0; + return; + spin_lock_irqsave(µcode_update_lock, flags); wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); /* get patch id after patching */ rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + spin_unlock_irqrestore(µcode_update_lock, flags); /* check current patch id and patch's id for match */ if (rev != mc_amd->hdr.patch_id) { printk(KERN_ERR "microcode: CPU%d: update failed " "(for patch_level=0x%x)\n", cpu, mc_amd->hdr.patch_id); - return -1; + return; } printk(KERN_INFO "microcode: CPU%d: updated (new patch_level=0x%x)\n", cpu, rev); uci->cpu_sig.rev = rev; - - return 0; } static int get_ucode_data(void *to, const u8 *from, size_t n) @@ -241,12 +257,13 @@ static int install_equiv_cpu_table(const u8 *buf) static void free_equiv_cpu_table(void) { - vfree(equiv_cpu_table); - equiv_cpu_table = NULL; + if (equiv_cpu_table) { + vfree(equiv_cpu_table); + equiv_cpu_table = NULL; + } } -static enum ucode_state -generic_load_microcode(int cpu, const u8 *data, size_t size) +static int generic_load_microcode(int cpu, const u8 *data, size_t size) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; const u8 *ucode_ptr = data; @@ -255,13 +272,12 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) int new_rev = uci->cpu_sig.rev; unsigned int leftover; unsigned long offset; - enum ucode_state state = UCODE_OK; offset = install_equiv_cpu_table(ucode_ptr); if (!offset) { printk(KERN_ERR "microcode: failed to create " "equivalent cpu table\n"); - return UCODE_ERROR; + return -EINVAL; } ucode_ptr += offset; @@ -277,7 +293,8 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) mc_header = (struct microcode_header_amd *)mc; if (get_matching_microcode(cpu, mc, new_rev)) { - vfree(new_mc); + if (new_mc) + vfree(new_mc); new_rev = mc_header->patch_id; new_mc = mc; } else @@ -289,32 +306,34 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) if (new_mc) { if (!leftover) { - vfree(uci->mc); + if (uci->mc) + vfree(uci->mc); uci->mc = new_mc; pr_debug("microcode: CPU%d found a matching microcode " "update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); - } else { + } else vfree(new_mc); - state = UCODE_ERROR; - } - } else - state = UCODE_NFOUND; + } free_equiv_cpu_table(); - return state; + return (int)leftover; } -static enum ucode_state request_microcode_fw(int cpu, struct device *device) +static int request_microcode_fw(int cpu, struct device *device) { const char *fw_name = "amd-ucode/microcode_amd.bin"; const struct firmware *firmware; - enum ucode_state ret; + int ret; + + /* We should bind the task to the CPU */ + BUG_ON(cpu != raw_smp_processor_id()); - if (request_firmware(&firmware, fw_name, device)) { + ret = request_firmware(&firmware, fw_name, device); + if (ret) { printk(KERN_ERR "microcode: failed to load file %s\n", fw_name); - return UCODE_NFOUND; + return ret; } ret = generic_load_microcode(cpu, firmware->data, firmware->size); @@ -324,12 +343,11 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) return ret; } -static enum ucode_state -request_microcode_user(int cpu, const void __user *buf, size_t size) +static int request_microcode_user(int cpu, const void __user *buf, size_t size) { printk(KERN_INFO "microcode: AMD microcode update via " "/dev/cpu/microcode not supported\n"); - return UCODE_ERROR; + return -1; } static void microcode_fini_cpu_amd(int cpu) diff --git a/trunk/arch/x86/kernel/microcode_core.c b/trunk/arch/x86/kernel/microcode_core.c index 9c4461501fcb..98c470c069d1 100644 --- a/trunk/arch/x86/kernel/microcode_core.c +++ b/trunk/arch/x86/kernel/microcode_core.c @@ -71,18 +71,27 @@ * Thanks to Stuart Swales for pointing out this bug. */ #include -#include #include +#include +#include #include +#include +#include +#include +#include #include #include #include +#include +#include +#include #include #include #include #include #include +#include MODULE_DESCRIPTION("Microcode Update Driver"); MODULE_AUTHOR("Tigran Aivazian "); @@ -92,110 +101,36 @@ MODULE_LICENSE("GPL"); static struct microcode_ops *microcode_ops; -/* - * Synchronization. - * - * All non cpu-hotplug-callback call sites use: - * - * - microcode_mutex to synchronize with each other; - * - get/put_online_cpus() to synchronize with - * the cpu-hotplug-callback call sites. - * - * We guarantee that only a single cpu is being - * updated at any particular moment of time. - */ +/* no concurrent ->write()s are allowed on /dev/cpu/microcode */ static DEFINE_MUTEX(microcode_mutex); struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; EXPORT_SYMBOL_GPL(ucode_cpu_info); -/* - * Operations that are run on a target cpu: - */ - -struct cpu_info_ctx { - struct cpu_signature *cpu_sig; - int err; -}; - -static void collect_cpu_info_local(void *arg) -{ - struct cpu_info_ctx *ctx = arg; - - ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(), - ctx->cpu_sig); -} - -static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) -{ - struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 }; - int ret; - - ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); - if (!ret) - ret = ctx.err; - - return ret; -} - -static int collect_cpu_info(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - int ret; - - memset(uci, 0, sizeof(*uci)); - - ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); - if (!ret) - uci->valid = 1; - - return ret; -} - -struct apply_microcode_ctx { - int err; -}; - -static void apply_microcode_local(void *arg) -{ - struct apply_microcode_ctx *ctx = arg; - - ctx->err = microcode_ops->apply_microcode(smp_processor_id()); -} - -static int apply_microcode_on_target(int cpu) -{ - struct apply_microcode_ctx ctx = { .err = 0 }; - int ret; - - ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); - if (!ret) - ret = ctx.err; - - return ret; -} - #ifdef CONFIG_MICROCODE_OLD_INTERFACE static int do_microcode_update(const void __user *buf, size_t size) { + cpumask_t old; int error = 0; int cpu; + old = current->cpus_allowed; + for_each_online_cpu(cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - enum ucode_state ustate; if (!uci->valid) continue; - ustate = microcode_ops->request_microcode_user(cpu, buf, size); - if (ustate == UCODE_ERROR) { - error = -1; - break; - } else if (ustate == UCODE_OK) - apply_microcode_on_target(cpu); + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + error = microcode_ops->request_microcode_user(cpu, buf, size); + if (error < 0) + goto out; + if (!error) + microcode_ops->apply_microcode(cpu); } - +out: + set_cpus_allowed_ptr(current, &old); return error; } @@ -208,17 +143,19 @@ static int microcode_open(struct inode *unused1, struct file *unused2) static ssize_t microcode_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { - ssize_t ret = -EINVAL; + ssize_t ret; if ((len >> PAGE_SHIFT) > num_physpages) { - pr_err("microcode: too much data (max %ld pages)\n", num_physpages); - return ret; + printk(KERN_ERR "microcode: too much data (max %ld pages)\n", + num_physpages); + return -EINVAL; } get_online_cpus(); mutex_lock(µcode_mutex); - if (do_microcode_update(buf, len) == 0) + ret = do_microcode_update(buf, len); + if (!ret) ret = (ssize_t)len; mutex_unlock(µcode_mutex); @@ -228,15 +165,15 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, } static const struct file_operations microcode_fops = { - .owner = THIS_MODULE, - .write = microcode_write, - .open = microcode_open, + .owner = THIS_MODULE, + .write = microcode_write, + .open = microcode_open, }; static struct miscdevice microcode_dev = { - .minor = MICROCODE_MINOR, - .name = "microcode", - .fops = µcode_fops, + .minor = MICROCODE_MINOR, + .name = "microcode", + .fops = µcode_fops, }; static int __init microcode_dev_init(void) @@ -245,7 +182,9 @@ static int __init microcode_dev_init(void) error = misc_register(µcode_dev); if (error) { - pr_err("microcode: can't misc_register on minor=%d\n", MICROCODE_MINOR); + printk(KERN_ERR + "microcode: can't misc_register on minor=%d\n", + MICROCODE_MINOR); return error; } @@ -266,51 +205,42 @@ MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); /* fake device for request_firmware */ static struct platform_device *microcode_pdev; -static int reload_for_cpu(int cpu) +static long reload_for_cpu(void *unused) { - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id(); int err = 0; mutex_lock(µcode_mutex); if (uci->valid) { - enum ucode_state ustate; - - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev); - if (ustate == UCODE_OK) - apply_microcode_on_target(cpu); - else - if (ustate == UCODE_ERROR) - err = -EINVAL; + err = microcode_ops->request_microcode_fw(smp_processor_id(), + µcode_pdev->dev); + if (!err) + microcode_ops->apply_microcode(smp_processor_id()); } mutex_unlock(µcode_mutex); - return err; } static ssize_t reload_store(struct sys_device *dev, struct sysdev_attribute *attr, - const char *buf, size_t size) + const char *buf, size_t sz) { - unsigned long val; - int cpu = dev->id; - int ret = 0; char *end; + unsigned long val = simple_strtoul(buf, &end, 0); + int err = 0; + int cpu = dev->id; - val = simple_strtoul(buf, &end, 0); if (end == buf) return -EINVAL; - if (val == 1) { get_online_cpus(); if (cpu_online(cpu)) - ret = reload_for_cpu(cpu); + err = work_on_cpu(cpu, reload_for_cpu, NULL); put_online_cpus(); } - - if (!ret) - ret = size; - - return ret; + if (err) + return err; + return sz; } static ssize_t version_show(struct sys_device *dev, @@ -341,11 +271,11 @@ static struct attribute *mc_default_attrs[] = { }; static struct attribute_group mc_attr_group = { - .attrs = mc_default_attrs, - .name = "microcode", + .attrs = mc_default_attrs, + .name = "microcode", }; -static void microcode_fini_cpu(int cpu) +static void __microcode_fini_cpu(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; @@ -353,68 +283,103 @@ static void microcode_fini_cpu(int cpu) uci->valid = 0; } -static enum ucode_state microcode_resume_cpu(int cpu) +static void microcode_fini_cpu(int cpu) { - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - if (!uci->mc) - return UCODE_NFOUND; + mutex_lock(µcode_mutex); + __microcode_fini_cpu(cpu); + mutex_unlock(µcode_mutex); +} - pr_debug("microcode: CPU%d updated upon resume\n", cpu); - apply_microcode_on_target(cpu); +static void collect_cpu_info(int cpu) +{ + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - return UCODE_OK; + memset(uci, 0, sizeof(*uci)); + if (!microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig)) + uci->valid = 1; } -static enum ucode_state microcode_init_cpu(int cpu) +static int microcode_resume_cpu(int cpu) { - enum ucode_state ustate; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct cpu_signature nsig; - if (collect_cpu_info(cpu)) - return UCODE_ERROR; + pr_debug("microcode: CPU%d resumed\n", cpu); - /* --dimm. Trigger a delayed update? */ - if (system_state != SYSTEM_RUNNING) - return UCODE_NFOUND; + if (!uci->mc) + return 1; - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev); + /* + * Let's verify that the 'cached' ucode does belong + * to this cpu (a bit of paranoia): + */ + if (microcode_ops->collect_cpu_info(cpu, &nsig)) { + __microcode_fini_cpu(cpu); + printk(KERN_ERR "failed to collect_cpu_info for resuming cpu #%d\n", + cpu); + return -1; + } - if (ustate == UCODE_OK) { - pr_debug("microcode: CPU%d updated upon init\n", cpu); - apply_microcode_on_target(cpu); + if ((nsig.sig != uci->cpu_sig.sig) || (nsig.pf != uci->cpu_sig.pf)) { + __microcode_fini_cpu(cpu); + printk(KERN_ERR "cached ucode doesn't match the resuming cpu #%d\n", + cpu); + /* Should we look for a new ucode here? */ + return 1; } - return ustate; + return 0; } -static enum ucode_state microcode_update_cpu(int cpu) +static long microcode_update_cpu(void *unused) { - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - enum ucode_state ustate; + struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id(); + int err = 0; - if (uci->valid) - ustate = microcode_resume_cpu(cpu); - else - ustate = microcode_init_cpu(cpu); + /* + * Check if the system resume is in progress (uci->valid != NULL), + * otherwise just request a firmware: + */ + if (uci->valid) { + err = microcode_resume_cpu(smp_processor_id()); + } else { + collect_cpu_info(smp_processor_id()); + if (uci->valid && system_state == SYSTEM_RUNNING) + err = microcode_ops->request_microcode_fw( + smp_processor_id(), + µcode_pdev->dev); + } + if (!err) + microcode_ops->apply_microcode(smp_processor_id()); + return err; +} - return ustate; +static int microcode_init_cpu(int cpu) +{ + int err; + mutex_lock(µcode_mutex); + err = work_on_cpu(cpu, microcode_update_cpu, NULL); + mutex_unlock(µcode_mutex); + + return err; } static int mc_sysdev_add(struct sys_device *sys_dev) { int err, cpu = sys_dev->id; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; if (!cpu_online(cpu)) return 0; pr_debug("microcode: CPU%d added\n", cpu); + memset(uci, 0, sizeof(*uci)); err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); if (err) return err; - if (microcode_init_cpu(cpu) == UCODE_ERROR) - err = -EINVAL; + err = microcode_init_cpu(cpu); return err; } @@ -435,30 +400,19 @@ static int mc_sysdev_remove(struct sys_device *sys_dev) static int mc_sysdev_resume(struct sys_device *dev) { int cpu = dev->id; - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; if (!cpu_online(cpu)) return 0; - /* - * All non-bootup cpus are still disabled, - * so only CPU 0 will apply ucode here. - * - * Moreover, there can be no concurrent - * updates from any other places at this point. - */ - WARN_ON(cpu != 0); - - if (uci->valid && uci->mc) - microcode_ops->apply_microcode(cpu); - + /* only CPU 0 will apply ucode here */ + microcode_update_cpu(NULL); return 0; } static struct sysdev_driver mc_sysdev_driver = { - .add = mc_sysdev_add, - .remove = mc_sysdev_remove, - .resume = mc_sysdev_resume, + .add = mc_sysdev_add, + .remove = mc_sysdev_remove, + .resume = mc_sysdev_resume, }; static __cpuinit int @@ -471,12 +425,15 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - microcode_update_cpu(cpu); + if (microcode_init_cpu(cpu)) + printk(KERN_ERR "microcode: failed to init CPU%d\n", + cpu); case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: pr_debug("microcode: CPU%d added\n", cpu); if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) - pr_err("microcode: Failed to create group for CPU%d\n", cpu); + printk(KERN_ERR "microcode: Failed to create the sysfs " + "group for CPU%d\n", cpu); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: @@ -508,10 +465,13 @@ static int __init microcode_init(void) microcode_ops = init_amd_microcode(); if (!microcode_ops) { - pr_err("microcode: no support for this CPU vendor\n"); + printk(KERN_ERR "microcode: no support for this CPU vendor\n"); return -ENODEV; } + error = microcode_dev_init(); + if (error) + return error; microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); if (IS_ERR(microcode_pdev)) { @@ -520,31 +480,23 @@ static int __init microcode_init(void) } get_online_cpus(); - mutex_lock(µcode_mutex); - error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); - - mutex_unlock(µcode_mutex); put_online_cpus(); - if (error) { + microcode_dev_exit(); platform_device_unregister(microcode_pdev); return error; } - error = microcode_dev_init(); - if (error) - return error; - register_hotcpu_notifier(&mc_cpu_notifier); - pr_info("Microcode Update Driver: v" MICROCODE_VERSION + printk(KERN_INFO + "Microcode Update Driver: v" MICROCODE_VERSION " ," " Peter Oruba\n"); return 0; } -module_init(microcode_init); static void __exit microcode_exit(void) { @@ -553,17 +505,16 @@ static void __exit microcode_exit(void) unregister_hotcpu_notifier(&mc_cpu_notifier); get_online_cpus(); - mutex_lock(µcode_mutex); - sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); - - mutex_unlock(µcode_mutex); put_online_cpus(); platform_device_unregister(microcode_pdev); microcode_ops = NULL; - pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); + printk(KERN_INFO + "Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); } + +module_init(microcode_init); module_exit(microcode_exit); diff --git a/trunk/arch/x86/kernel/microcode_intel.c b/trunk/arch/x86/kernel/microcode_intel.c index 0d334ddd0a96..149b9ec7c1ab 100644 --- a/trunk/arch/x86/kernel/microcode_intel.c +++ b/trunk/arch/x86/kernel/microcode_intel.c @@ -70,11 +70,24 @@ * Fix sigmatch() macro to handle old CPUs with pf == 0. * Thanks to Stuart Swales for pointing out this bug. */ +#include +#include +#include #include +#include +#include +#include #include +#include #include #include -#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -137,9 +150,13 @@ struct extended_sigtable { #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE) +/* serialize access to the physical write to MSR 0x79 */ +static DEFINE_SPINLOCK(microcode_update_lock); + static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu_num); + unsigned long flags; unsigned int val[2]; memset(csig, 0, sizeof(*csig)); @@ -159,14 +176,18 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) csig->pf = 1 << ((val[1] >> 18) & 7); } + /* serialize access to the physical write to MSR 0x79 */ + spin_lock_irqsave(µcode_update_lock, flags); + wrmsr(MSR_IA32_UCODE_REV, 0, 0); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); + spin_unlock_irqrestore(µcode_update_lock, flags); - printk(KERN_INFO "microcode: CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", - cpu_num, csig->sig, csig->pf, csig->rev); + pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", + csig->sig, csig->pf, csig->rev); return 0; } @@ -297,10 +318,11 @@ get_matching_microcode(struct cpu_signature *cpu_sig, void *mc, int rev) return 0; } -static int apply_microcode(int cpu) +static void apply_microcode(int cpu) { struct microcode_intel *mc_intel; struct ucode_cpu_info *uci; + unsigned long flags; unsigned int val[2]; int cpu_num; @@ -312,7 +334,10 @@ static int apply_microcode(int cpu) BUG_ON(cpu_num != cpu); if (mc_intel == NULL) - return 0; + return; + + /* serialize access to the physical write to MSR 0x79 */ + spin_lock_irqsave(µcode_update_lock, flags); /* write microcode via MSR 0x79 */ wrmsr(MSR_IA32_UCODE_WRITE, @@ -326,32 +351,30 @@ static int apply_microcode(int cpu) /* get the current revision from MSR 0x8B */ rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + spin_unlock_irqrestore(µcode_update_lock, flags); if (val[1] != mc_intel->hdr.rev) { - printk(KERN_ERR "microcode: CPU%d update " - "to revision 0x%x failed\n", - cpu_num, mc_intel->hdr.rev); - return -1; + printk(KERN_ERR "microcode: CPU%d update from revision " + "0x%x to 0x%x failed\n", + cpu_num, uci->cpu_sig.rev, val[1]); + return; } - printk(KERN_INFO "microcode: CPU%d updated to revision " - "0x%x, date = %04x-%02x-%02x \n", - cpu_num, val[1], + printk(KERN_INFO "microcode: CPU%d updated from revision " + "0x%x to 0x%x, date = %04x-%02x-%02x \n", + cpu_num, uci->cpu_sig.rev, val[1], mc_intel->hdr.date & 0xffff, mc_intel->hdr.date >> 24, (mc_intel->hdr.date >> 16) & 0xff); uci->cpu_sig.rev = val[1]; - - return 0; } -static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, - int (*get_ucode_data)(void *, const void *, size_t)) +static int generic_load_microcode(int cpu, void *data, size_t size, + int (*get_ucode_data)(void *, const void *, size_t)) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u8 *ucode_ptr = data, *new_mc = NULL, *mc; int new_rev = uci->cpu_sig.rev; unsigned int leftover = size; - enum ucode_state state = UCODE_OK; while (leftover) { struct microcode_header_intel mc_header; @@ -389,15 +412,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, leftover -= mc_size; } - if (leftover) { - if (new_mc) - vfree(new_mc); - state = UCODE_ERROR; + if (!new_mc) goto out; - } - if (!new_mc) { - state = UCODE_NFOUND; + if (leftover) { + vfree(new_mc); goto out; } @@ -408,8 +427,9 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, pr_debug("microcode: CPU%d found a matching microcode update with" " version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); -out: - return state; + + out: + return (int)leftover; } static int get_ucode_fw(void *to, const void *from, size_t n) @@ -418,19 +438,21 @@ static int get_ucode_fw(void *to, const void *from, size_t n) return 0; } -static enum ucode_state request_microcode_fw(int cpu, struct device *device) +static int request_microcode_fw(int cpu, struct device *device) { char name[30]; struct cpuinfo_x86 *c = &cpu_data(cpu); const struct firmware *firmware; - enum ucode_state ret; + int ret; + /* We should bind the task to the CPU */ + BUG_ON(cpu != raw_smp_processor_id()); sprintf(name, "intel-ucode/%02x-%02x-%02x", c->x86, c->x86_model, c->x86_mask); - - if (request_firmware(&firmware, name, device)) { + ret = request_firmware(&firmware, name, device); + if (ret) { pr_debug("microcode: data file %s load failed\n", name); - return UCODE_NFOUND; + return ret; } ret = generic_load_microcode(cpu, (void *)firmware->data, @@ -446,9 +468,11 @@ static int get_ucode_user(void *to, const void *from, size_t n) return copy_from_user(to, from, n); } -static enum ucode_state -request_microcode_user(int cpu, const void __user *buf, size_t size) +static int request_microcode_user(int cpu, const void __user *buf, size_t size) { + /* We should bind the task to the CPU */ + BUG_ON(cpu != raw_smp_processor_id()); + return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); } diff --git a/trunk/arch/x86/kernel/mpparse.c b/trunk/arch/x86/kernel/mpparse.c index 651c93b28862..70fd7e414c15 100644 --- a/trunk/arch/x86/kernel/mpparse.c +++ b/trunk/arch/x86/kernel/mpparse.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -871,17 +870,24 @@ static inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} #endif /* CONFIG_X86_IO_APIC */ -static int -check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count) +static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, + int count) { - int ret = 0; - - if (!mpc_new_phys || count <= mpc_new_length) { - WARN(1, "update_mptable: No spare slots (length: %x)\n", count); - return -1; + if (!mpc_new_phys) { + pr_info("No spare slots, try to append...take your risk, " + "new mpc_length %x\n", count); + } else { + if (count <= mpc_new_length) + pr_info("No spare slots, try to append..., " + "new mpc_length %x\n", count); + else { + pr_err("mpc_new_length %lx is too small\n", + mpc_new_length); + return -1; + } } - return ret; + return 0; } static int __init replace_intsrc_all(struct mpc_table *mpc, @@ -940,7 +946,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, } else { struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; count += sizeof(struct mpc_intsrc); - if (check_slot(mpc_new_phys, mpc_new_length, count) < 0) + if (!check_slot(mpc_new_phys, mpc_new_length, count)) goto out; assign_to_mpc_intsrc(&mp_irqs[i], m); mpc->length = count; @@ -957,14 +963,11 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, return 0; } -int enable_update_mptable; +static int __initdata enable_update_mptable; static int __init update_mptable_setup(char *str) { enable_update_mptable = 1; -#ifdef CONFIG_PCI - pci_routeirq = 1; -#endif return 0; } early_param("update_mptable", update_mptable_setup); @@ -977,9 +980,6 @@ static int __initdata alloc_mptable; static int __init parse_alloc_mptable_opt(char *p) { enable_update_mptable = 1; -#ifdef CONFIG_PCI - pci_routeirq = 1; -#endif alloc_mptable = 1; if (!p) return 0; diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index e22d63bdc8ff..ca989158e847 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -8,11 +8,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -615,16 +613,3 @@ static int __init idle_setup(char *str) } early_param("idle", idle_setup); -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long range_end = mm->brk + 0x02000000; - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; -} - diff --git a/trunk/arch/x86/kernel/process_32.c b/trunk/arch/x86/kernel/process_32.c index 56d50b7d71df..76f8f84043a2 100644 --- a/trunk/arch/x86/kernel/process_32.c +++ b/trunk/arch/x86/kernel/process_32.c @@ -9,6 +9,8 @@ * This file handles the architecture-dependent parts of process handling.. */ +#include + #include #include #include @@ -31,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -494,3 +497,15 @@ unsigned long get_wchan(struct task_struct *p) return 0; } +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_int() % 8192; + return sp & ~0xf; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long range_end = mm->brk + 0x02000000; + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} diff --git a/trunk/arch/x86/kernel/process_64.c b/trunk/arch/x86/kernel/process_64.c index 9d6b20e6cd80..b751a41392b1 100644 --- a/trunk/arch/x86/kernel/process_64.c +++ b/trunk/arch/x86/kernel/process_64.c @@ -14,6 +14,8 @@ * This file handles the architecture-dependent parts of process handling.. */ +#include + #include #include #include @@ -30,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -657,3 +660,15 @@ long sys_arch_prctl(int code, unsigned long addr) return do_arch_prctl(current, code, addr); } +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_int() % 8192; + return sp & ~0xf; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long range_end = mm->brk + 0x02000000; + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} diff --git a/trunk/arch/x86/kernel/quirks.c b/trunk/arch/x86/kernel/quirks.c index af71d06624bf..7563b31b4f03 100644 --- a/trunk/arch/x86/kernel/quirks.c +++ b/trunk/arch/x86/kernel/quirks.c @@ -491,42 +491,5 @@ void force_hpet_resume(void) break; } } -#endif - -#if defined(CONFIG_PCI) && defined(CONFIG_NUMA) -/* Set correct numa_node information for AMD NB functions */ -static void __init quirk_amd_nb_node(struct pci_dev *dev) -{ - struct pci_dev *nb_ht; - unsigned int devfn; - u32 val; - - devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); - nb_ht = pci_get_slot(dev->bus, devfn); - if (!nb_ht) - return; - - pci_read_config_dword(nb_ht, 0x60, &val); - set_dev_node(&dev->dev, val & 7); - pci_dev_put(dev); -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, - quirk_amd_nb_node); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, - quirk_amd_nb_node); #endif diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c index d1c636bf31a7..b4158439bf63 100644 --- a/trunk/arch/x86/kernel/setup.c +++ b/trunk/arch/x86/kernel/setup.c @@ -112,14 +112,6 @@ #define ARCH_SETUP #endif -/* - * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. - * The direct mapping extends to max_pfn_mapped, so that we can directly access - * apertures, ACPI and other tables without having to play with fixmaps. - */ -unsigned long max_low_pfn_mapped; -unsigned long max_pfn_mapped; - RESERVE_BRK(dmi_alloc, 65536); unsigned int boot_cpu_id __read_mostly; @@ -222,8 +214,8 @@ unsigned long mmu_cr4_features; unsigned long mmu_cr4_features = X86_CR4_PAE; #endif -/* Boot loader ID and version as integers, for the benefit of proc_dointvec */ -int bootloader_type, bootloader_version; +/* Boot loader ID as an integer, for the benefit of proc_dointvec */ +int bootloader_type; /* * Setup options @@ -714,12 +706,6 @@ void __init setup_arch(char **cmdline_p) #endif saved_video_mode = boot_params.hdr.vid_mode; bootloader_type = boot_params.hdr.type_of_loader; - if ((bootloader_type >> 4) == 0xe) { - bootloader_type &= 0xf; - bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4; - } - bootloader_version = bootloader_type & 0xf; - bootloader_version |= boot_params.hdr.ext_loader_ver << 4; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; @@ -868,16 +854,12 @@ void __init setup_arch(char **cmdline_p) max_low_pfn = max_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; - max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; #endif #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION setup_bios_corruption_check(); #endif - printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", - max_pfn_mapped<arch_pre_intr_init) { + if (x86_quirks->arch_pre_intr_init()) + return; + } + init_ISA_irqs(); +} + /** * x86_quirk_intr_init - post gate setup interrupt initialisation * diff --git a/trunk/arch/x86/kernel/setup_percpu.c b/trunk/arch/x86/kernel/setup_percpu.c index 9c3f0823e6aa..8f0e13be36b3 100644 --- a/trunk/arch/x86/kernel/setup_percpu.c +++ b/trunk/arch/x86/kernel/setup_percpu.c @@ -425,14 +425,6 @@ void __init setup_per_cpu_areas(void) early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; #endif -#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) - /* - * make sure boot cpu node_number is right, when boot cpu is on the - * node that doesn't have mem installed - */ - per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id); -#endif - /* Setup node to cpumask map */ setup_node_to_cpumask_map(); diff --git a/trunk/arch/x86/kernel/smp.c b/trunk/arch/x86/kernel/smp.c index f6db48c405b8..13f33ea8ccaa 100644 --- a/trunk/arch/x86/kernel/smp.c +++ b/trunk/arch/x86/kernel/smp.c @@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) } struct smp_ops smp_ops = { - .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, - .smp_prepare_cpus = native_smp_prepare_cpus, - .smp_cpus_done = native_smp_cpus_done, + .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, + .smp_prepare_cpus = native_smp_prepare_cpus, + .smp_cpus_done = native_smp_cpus_done, - .smp_send_stop = native_smp_send_stop, - .smp_send_reschedule = native_smp_send_reschedule, + .smp_send_stop = native_smp_send_stop, + .smp_send_reschedule = native_smp_send_reschedule, - .cpu_up = native_cpu_up, - .cpu_die = native_cpu_die, - .cpu_disable = native_cpu_disable, - .play_dead = native_play_dead, + .cpu_up = native_cpu_up, + .cpu_die = native_cpu_die, + .cpu_disable = native_cpu_disable, + .play_dead = native_play_dead, - .send_call_func_ipi = native_send_call_func_ipi, + .send_call_func_ipi = native_send_call_func_ipi, .send_call_func_single_ipi = native_send_call_func_single_ipi, }; EXPORT_SYMBOL_GPL(smp_ops); diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index 7c80007ea5f7..58d24ef917d8 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid) * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this * won't ... remember to clear down the APIC, etc later. */ -int __cpuinit +int __devinit wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) { unsigned long send_status, accept_status = 0; @@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) return (send_status | accept_status); } -static int __cpuinit +int __devinit wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) { unsigned long send_status, accept_status = 0; @@ -822,12 +822,10 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* mark "stuck" area as not stuck */ *((volatile unsigned long *)trampoline_base) = 0; - if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { - /* - * Cleanup possible dangling ends... - */ - smpboot_restore_warm_reset_vector(); - } + /* + * Cleanup possible dangling ends... + */ + smpboot_restore_warm_reset_vector(); return boot_error; } @@ -992,12 +990,10 @@ static int __init smp_sanity_check(unsigned max_cpus) */ if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) { - if (!disable_apic) { - pr_err("BIOS bug, local APIC #%d not detected!...\n", - boot_cpu_physical_apicid); - pr_err("... forcing use of dummy APIC emulation." + printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", + boot_cpu_physical_apicid); + printk(KERN_ERR "... forcing use of dummy APIC emulation." "(tell your hw vendor)\n"); - } smpboot_clear_io_apic(); arch_disable_smp_support(); return -1; diff --git a/trunk/arch/x86/kernel/tlb_uv.c b/trunk/arch/x86/kernel/tlb_uv.c index 8c7b03b0cfcb..16f0fd4f18ec 100644 --- a/trunk/arch/x86/kernel/tlb_uv.c +++ b/trunk/arch/x86/kernel/tlb_uv.c @@ -715,7 +715,12 @@ uv_activation_descriptor_init(int node, int pnode) struct bau_desc *adp; struct bau_desc *ad2; - adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node); + /* + * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) + * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade + */ + adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* + UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); BUG_ON(!adp); pa = uv_gpa(adp); /* need the real nasid*/ @@ -729,7 +734,13 @@ uv_activation_descriptor_init(int node, int pnode) (n << UV_DESC_BASE_PNODE_SHIFT | m)); } - for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { + /* + * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each + * cpu even though we only use the first one; one descriptor can + * describe a broadcast to 256 nodes. + */ + for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); + i++, ad2++) { memset(ad2, 0, sizeof(struct bau_desc)); ad2->header.sw_ack_flag = 1; /* @@ -832,7 +843,7 @@ static int __init uv_bau_init(void) return 0; for_each_possible_cpu(cur_cpu) - zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), + alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), GFP_KERNEL, cpu_to_node(cur_cpu)); uv_bau_retry_limit = 1; diff --git a/trunk/arch/x86/kernel/traps.c b/trunk/arch/x86/kernel/traps.c index ede024531f8f..a1d288327ff0 100644 --- a/trunk/arch/x86/kernel/traps.c +++ b/trunk/arch/x86/kernel/traps.c @@ -839,6 +839,9 @@ asmlinkage void math_state_restore(void) } clts(); /* Allow maths ops (or we recurse) */ +#ifdef CONFIG_X86_32 + restore_fpu(tsk); +#else /* * Paranoid restore. send a SIGSEGV if we fail to restore the state. */ @@ -847,7 +850,7 @@ asmlinkage void math_state_restore(void) force_sig(SIGSEGV, tsk); return; } - +#endif thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ tsk->fpu_counter++; } @@ -966,8 +969,11 @@ void __init trap_init(void) for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) set_bit(i, used_vectors); +#ifdef CONFIG_X86_64 set_bit(IA32_SYSCALL_VECTOR, used_vectors); - +#else + set_bit(SYSCALL_VECTOR, used_vectors); +#endif /* * Should be a barrier for any external CPU state: */ diff --git a/trunk/arch/x86/kernel/tsc.c b/trunk/arch/x86/kernel/tsc.c index 84d27356c3d0..d57de05dc430 100644 --- a/trunk/arch/x86/kernel/tsc.c +++ b/trunk/arch/x86/kernel/tsc.c @@ -384,13 +384,13 @@ unsigned long native_calibrate_tsc(void) { u64 tsc1, tsc2, delta, ref1, ref2; unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; - unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz; + unsigned long flags, latch, ms, fast_calibrate, tsc_khz; int hpet = is_hpet_enabled(), i, loopmin; - hv_tsc_khz = get_hypervisor_tsc_freq(); - if (hv_tsc_khz) { + tsc_khz = get_hypervisor_tsc_freq(); + if (tsc_khz) { printk(KERN_INFO "TSC: Frequency read from the hypervisor\n"); - return hv_tsc_khz; + return tsc_khz; } local_irq_save(flags); diff --git a/trunk/arch/x86/kernel/tsc_sync.c b/trunk/arch/x86/kernel/tsc_sync.c index 027b5b498993..bf36328f6ef9 100644 --- a/trunk/arch/x86/kernel/tsc_sync.c +++ b/trunk/arch/x86/kernel/tsc_sync.c @@ -34,7 +34,6 @@ static __cpuinitdata atomic_t stop_count; * of a critical section, to be able to prove TSC time-warps: */ static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; - static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; static __cpuinitdata int nr_warps; @@ -114,12 +113,13 @@ void __cpuinit check_tsc_sync_source(int cpu) return; if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { - pr_info("Skipping synchronization checks as TSC is reliable.\n"); + printk(KERN_INFO + "Skipping synchronization checks as TSC is reliable.\n"); return; } - pr_info("checking TSC synchronization [CPU#%d -> CPU#%d]:", - smp_processor_id(), cpu); + printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:", + smp_processor_id(), cpu); /* * Reset it - in case this is a second bootup: @@ -143,8 +143,8 @@ void __cpuinit check_tsc_sync_source(int cpu) if (nr_warps) { printk("\n"); - pr_warning("Measured %Ld cycles TSC warp between CPUs, " - "turning off TSC clock.\n", max_warp); + printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs," + " turning off TSC clock.\n", max_warp); mark_tsc_unstable("check_tsc_sync_source failed"); } else { printk(" passed.\n"); @@ -195,3 +195,5 @@ void __cpuinit check_tsc_sync_target(void) while (atomic_read(&stop_count) != cpus) cpu_relax(); } +#undef NR_LOOPS + diff --git a/trunk/arch/x86/kernel/vm86_32.c b/trunk/arch/x86/kernel/vm86_32.c index b8035a0f4048..d7ac84e7fc1c 100644 --- a/trunk/arch/x86/kernel/vm86_32.c +++ b/trunk/arch/x86/kernel/vm86_32.c @@ -318,9 +318,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk } /* - * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) + * Save old state, set default return value (%ax) to 0 */ - info->regs32->ax = VM86_SIGNAL; + info->regs32->ax = 0; tsk->thread.saved_sp0 = tsk->thread.sp0; tsk->thread.saved_fs = info->regs32->fs; tsk->thread.saved_gs = get_user_gs(info->regs32); diff --git a/trunk/arch/x86/kernel/vmlinux.lds.S b/trunk/arch/x86/kernel/vmlinux.lds.S index 4c85b2e2bb65..849ee611f013 100644 --- a/trunk/arch/x86/kernel/vmlinux.lds.S +++ b/trunk/arch/x86/kernel/vmlinux.lds.S @@ -1,431 +1,5 @@ -/* - * ld script for the x86 kernel - * - * Historic 32-bit version written by Martin Mares - * - * Modernisation, unification and other changes and fixes: - * Copyright (C) 2007-2009 Sam Ravnborg - * - * - * Don't define absolute symbols until and unless you know that symbol - * value is should remain constant even if kernel image is relocated - * at run time. Absolute symbols are not relocated. If symbol value should - * change if kernel is relocated, make the symbol section relative and - * put it inside the section definition. - */ - #ifdef CONFIG_X86_32 -#define LOAD_OFFSET __PAGE_OFFSET +# include "vmlinux_32.lds.S" #else -#define LOAD_OFFSET __START_KERNEL_map +# include "vmlinux_64.lds.S" #endif - -#include -#include -#include -#include -#include -#include - -#undef i386 /* in case the preprocessor is a 32bit one */ - -OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) - -#ifdef CONFIG_X86_32 -OUTPUT_ARCH(i386) -ENTRY(phys_startup_32) -jiffies = jiffies_64; -#else -OUTPUT_ARCH(i386:x86-64) -ENTRY(phys_startup_64) -jiffies_64 = jiffies; -#endif - -PHDRS { - text PT_LOAD FLAGS(5); /* R_E */ - data PT_LOAD FLAGS(7); /* RWE */ -#ifdef CONFIG_X86_64 - user PT_LOAD FLAGS(7); /* RWE */ - data.init PT_LOAD FLAGS(7); /* RWE */ -#ifdef CONFIG_SMP - percpu PT_LOAD FLAGS(7); /* RWE */ -#endif - data.init2 PT_LOAD FLAGS(7); /* RWE */ -#endif - note PT_NOTE FLAGS(0); /* ___ */ -} - -SECTIONS -{ -#ifdef CONFIG_X86_32 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; - phys_startup_32 = startup_32 - LOAD_OFFSET; -#else - . = __START_KERNEL; - phys_startup_64 = startup_64 - LOAD_OFFSET; -#endif - - /* Text and read-only data */ - - /* bootstrapping code */ - .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) { - _text = .; - *(.text.head) - } :text = 0x9090 - - /* The rest of the text */ - .text : AT(ADDR(.text) - LOAD_OFFSET) { -#ifdef CONFIG_X86_32 - /* not really needed, already page aligned */ - . = ALIGN(PAGE_SIZE); - *(.text.page_aligned) -#endif - . = ALIGN(8); - _stext = .; - TEXT_TEXT - SCHED_TEXT - LOCK_TEXT - KPROBES_TEXT - IRQENTRY_TEXT - *(.fixup) - *(.gnu.warning) - /* End of text section */ - _etext = .; - } :text = 0x9090 - - NOTES :text :note - - /* Exception table */ - . = ALIGN(16); - __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } :text = 0x9090 - - RODATA - - /* Data */ - . = ALIGN(PAGE_SIZE); - .data : AT(ADDR(.data) - LOAD_OFFSET) { - DATA_DATA - CONSTRUCTORS - -#ifdef CONFIG_X86_64 - /* End of data section */ - _edata = .; -#endif - } :data - -#ifdef CONFIG_X86_32 - /* 32 bit has nosave before _edata */ - . = ALIGN(PAGE_SIZE); - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { - __nosave_begin = .; - *(.data.nosave) - . = ALIGN(PAGE_SIZE); - __nosave_end = .; - } -#endif - - . = ALIGN(PAGE_SIZE); - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { - *(.data.page_aligned) - *(.data.idt) - } - -#ifdef CONFIG_X86_32 - . = ALIGN(32); -#else - . = ALIGN(PAGE_SIZE); - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); -#endif - .data.cacheline_aligned : - AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { - *(.data.cacheline_aligned) - } - - /* rarely changed data like cpu maps */ -#ifdef CONFIG_X86_32 - . = ALIGN(32); -#else - . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); -#endif - .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { - *(.data.read_mostly) - -#ifdef CONFIG_X86_32 - /* End of data section */ - _edata = .; -#endif - } - -#ifdef CONFIG_X86_64 - -#define VSYSCALL_ADDR (-10*1024*1024) -#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \ - SIZEOF(.data.read_mostly) + 4095) & ~(4095)) -#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \ - SIZEOF(.data.read_mostly) + 4095) & ~(4095)) - -#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) -#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) - -#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) -#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) - - . = VSYSCALL_ADDR; - .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { - *(.vsyscall_0) - } :user - - __vsyscall_0 = VSYSCALL_VIRT_ADDR; - - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); - .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { - *(.vsyscall_fn) - } - - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); - .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) { - *(.vsyscall_gtod_data) - } - - vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); - .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) { - *(.vsyscall_clock) - } - vsyscall_clock = VVIRT(.vsyscall_clock); - - - .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { - *(.vsyscall_1) - } - .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) { - *(.vsyscall_2) - } - - .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { - *(.vgetcpu_mode) - } - vgetcpu_mode = VVIRT(.vgetcpu_mode); - - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); - .jiffies : AT(VLOAD(.jiffies)) { - *(.jiffies) - } - jiffies = VVIRT(.jiffies); - - .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { - *(.vsyscall_3) - } - - . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; - -#undef VSYSCALL_ADDR -#undef VSYSCALL_PHYS_ADDR -#undef VSYSCALL_VIRT_ADDR -#undef VLOAD_OFFSET -#undef VLOAD -#undef VVIRT_OFFSET -#undef VVIRT - -#endif /* CONFIG_X86_64 */ - - /* init_task */ - . = ALIGN(THREAD_SIZE); - .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { - *(.data.init_task) - } -#ifdef CONFIG_X86_64 - :data.init -#endif - - /* - * smp_locks might be freed after init - * start/end must be page aligned - */ - . = ALIGN(PAGE_SIZE); - .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { - __smp_locks = .; - *(.smp_locks) - __smp_locks_end = .; - . = ALIGN(PAGE_SIZE); - } - - /* Init code and data - will be freed after init */ - . = ALIGN(PAGE_SIZE); - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - __init_begin = .; /* paired with __init_end */ - _sinittext = .; - INIT_TEXT - _einittext = .; - } - - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { - INIT_DATA - } - - . = ALIGN(16); - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - - .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { - __x86_cpu_dev_start = .; - *(.x86_cpu_dev.init) - __x86_cpu_dev_end = .; - } - - SECURITY_INIT - - . = ALIGN(8); - .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { - __parainstructions = .; - *(.parainstructions) - __parainstructions_end = .; - } - - . = ALIGN(8); - .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { - __alt_instructions = .; - *(.altinstructions) - __alt_instructions_end = .; - } - - .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { - *(.altinstr_replacement) - } - - /* - * .exit.text is discard at runtime, not link time, to deal with - * references from .altinstructions and .eh_frame - */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { - EXIT_TEXT - } - - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { - EXIT_DATA - } - -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(PAGE_SIZE); - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif - -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) - /* - * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the - * output PHDR, so the next output section - __data_nosave - should - * start another section data.init2. Also, pda should be at the head of - * percpu area. Preallocate it and define the percpu offset symbol - * so that it can be accessed as a percpu variable. - */ - . = ALIGN(PAGE_SIZE); - PERCPU_VADDR(0, :percpu) -#else - PERCPU(PAGE_SIZE) -#endif - - . = ALIGN(PAGE_SIZE); - - /* freed after init ends here */ - .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) { - __init_end = .; - } - -#ifdef CONFIG_X86_64 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { - . = ALIGN(PAGE_SIZE); - __nosave_begin = .; - *(.data.nosave) - . = ALIGN(PAGE_SIZE); - __nosave_end = .; - } :data.init2 - /* use another section data.init2, see PERCPU_VADDR() above */ -#endif - - /* BSS */ - . = ALIGN(PAGE_SIZE); - .bss : AT(ADDR(.bss) - LOAD_OFFSET) { - __bss_start = .; - *(.bss.page_aligned) - *(.bss) - . = ALIGN(4); - __bss_stop = .; - } - - . = ALIGN(PAGE_SIZE); - .brk : AT(ADDR(.brk) - LOAD_OFFSET) { - __brk_base = .; - . += 64 * 1024; /* 64k alignment slop space */ - *(.brk_reservation) /* areas brk users have reserved */ - __brk_limit = .; - } - - .end : AT(ADDR(.end) - LOAD_OFFSET) { - _end = .; - } - - /* Sections to be discarded */ - /DISCARD/ : { - *(.exitcall.exit) - *(.eh_frame) - *(.discard) - } - - STABS_DEBUG - DWARF_DEBUG -} - - -#ifdef CONFIG_X86_32 -ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE") -#else -/* - * Per-cpu symbols which need to be offset from __per_cpu_load - * for the boot processor. - */ -#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load -INIT_PER_CPU(gdt_page); -INIT_PER_CPU(irq_stack_union); - -/* - * Build-time check on the image size: - */ -ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), - "kernel image bigger than KERNEL_IMAGE_SIZE") - -#ifdef CONFIG_SMP -ASSERT((per_cpu__irq_stack_union == 0), - "irq_stack_union is not at start of per-cpu area"); -#endif - -#endif /* CONFIG_X86_32 */ - -#ifdef CONFIG_KEXEC -#include - -ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, - "kexec control code size is too big") -#endif - diff --git a/trunk/arch/x86/kernel/vmlinux_32.lds.S b/trunk/arch/x86/kernel/vmlinux_32.lds.S new file mode 100644 index 000000000000..62ad500d55f3 --- /dev/null +++ b/trunk/arch/x86/kernel/vmlinux_32.lds.S @@ -0,0 +1,229 @@ +/* ld script to make i386 Linux kernel + * Written by Martin Mares ; + * + * Don't define absolute symbols until and unless you know that symbol + * value is should remain constant even if kernel image is relocated + * at run time. Absolute symbols are not relocated. If symbol value should + * change if kernel is relocated, make the symbol section relative and + * put it inside the section definition. + */ + +#define LOAD_OFFSET __PAGE_OFFSET + +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_ARCH(i386) +ENTRY(phys_startup_32) +jiffies = jiffies_64; + +PHDRS { + text PT_LOAD FLAGS(5); /* R_E */ + data PT_LOAD FLAGS(7); /* RWE */ + note PT_NOTE FLAGS(0); /* ___ */ +} +SECTIONS +{ + . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; + phys_startup_32 = startup_32 - LOAD_OFFSET; + + .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) { + _text = .; /* Text and read-only data */ + *(.text.head) + } :text = 0x9090 + + /* read-only */ + .text : AT(ADDR(.text) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); /* not really needed, already page aligned */ + *(.text.page_aligned) + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) + _etext = .; /* End of text section */ + } :text = 0x9090 + + NOTES :text :note + + . = ALIGN(16); /* Exception table */ + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } :text = 0x9090 + + RODATA + + /* writeable */ + . = ALIGN(PAGE_SIZE); + .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ + DATA_DATA + CONSTRUCTORS + } :data + + . = ALIGN(PAGE_SIZE); + .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { + __nosave_begin = .; + *(.data.nosave) + . = ALIGN(PAGE_SIZE); + __nosave_end = .; + } + + . = ALIGN(PAGE_SIZE); + .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { + *(.data.page_aligned) + *(.data.idt) + } + + . = ALIGN(32); + .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { + *(.data.cacheline_aligned) + } + + /* rarely changed data like cpu maps */ + . = ALIGN(32); + .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { + *(.data.read_mostly) + _edata = .; /* End of data section */ + } + + . = ALIGN(THREAD_SIZE); /* init_task */ + .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { + *(.data.init_task) + } + + /* might get freed after init */ + . = ALIGN(PAGE_SIZE); + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + __smp_locks = .; + *(.smp_locks) + __smp_locks_end = .; + } + /* will be freed after init + * Following ALIGN() is required to make sure no other data falls on the + * same page where __smp_alt_end is pointing as that page might be freed + * after boot. Always make sure that ALIGN() directive is present after + * the section which contains __smp_alt_end. + */ + . = ALIGN(PAGE_SIZE); + + /* will be freed after init */ + . = ALIGN(PAGE_SIZE); /* Init code and data */ + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { + __init_begin = .; + _sinittext = .; + INIT_TEXT + _einittext = .; + } + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + INIT_DATA + } + . = ALIGN(16); + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { + __initcall_start = .; + INITCALLS + __initcall_end = .; + } + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { + __x86_cpu_dev_start = .; + *(.x86_cpu_dev.init) + __x86_cpu_dev_end = .; + } + SECURITY_INIT + . = ALIGN(4); + .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { + *(.altinstr_replacement) + } + . = ALIGN(4); + .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { + __parainstructions = .; + *(.parainstructions) + __parainstructions_end = .; + } + /* .exit.text is discard at runtime, not link time, to deal with references + from .altinstructions and .eh_frame */ + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { + EXIT_TEXT + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } +#if defined(CONFIG_BLK_DEV_INITRD) + . = ALIGN(PAGE_SIZE); + .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } +#endif + PERCPU(PAGE_SIZE) + . = ALIGN(PAGE_SIZE); + /* freed after init ends here */ + + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { + __init_end = .; + __bss_start = .; /* BSS */ + *(.bss.page_aligned) + *(.bss) + . = ALIGN(4); + __bss_stop = .; + } + + .brk : AT(ADDR(.brk) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); + __brk_base = . ; + . += 64 * 1024 ; /* 64k alignment slop space */ + *(.brk_reservation) /* areas brk users have reserved */ + __brk_limit = . ; + } + + .end : AT(ADDR(.end) - LOAD_OFFSET) { + _end = . ; + } + + /* Sections to be discarded */ + /DISCARD/ : { + *(.exitcall.exit) + *(.discard) + } + + STABS_DEBUG + + DWARF_DEBUG +} + +/* + * Build-time check on the image size: + */ +ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE") + +#ifdef CONFIG_KEXEC +/* Link time checks */ +#include + +ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, + "kexec control code size is too big") +#endif diff --git a/trunk/arch/x86/kernel/vmlinux_64.lds.S b/trunk/arch/x86/kernel/vmlinux_64.lds.S new file mode 100644 index 000000000000..c8742507b030 --- /dev/null +++ b/trunk/arch/x86/kernel/vmlinux_64.lds.S @@ -0,0 +1,298 @@ +/* ld script to make x86-64 Linux kernel + * Written by Martin Mares ; + */ + +#define LOAD_OFFSET __START_KERNEL_map + +#include +#include +#include + +#undef i386 /* in case the preprocessor is a 32bit one */ + +OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") +OUTPUT_ARCH(i386:x86-64) +ENTRY(phys_startup_64) +jiffies_64 = jiffies; +PHDRS { + text PT_LOAD FLAGS(5); /* R_E */ + data PT_LOAD FLAGS(7); /* RWE */ + user PT_LOAD FLAGS(7); /* RWE */ + data.init PT_LOAD FLAGS(7); /* RWE */ +#ifdef CONFIG_SMP + percpu PT_LOAD FLAGS(7); /* RWE */ +#endif + data.init2 PT_LOAD FLAGS(7); /* RWE */ + note PT_NOTE FLAGS(0); /* ___ */ +} +SECTIONS +{ + . = __START_KERNEL; + phys_startup_64 = startup_64 - LOAD_OFFSET; + .text : AT(ADDR(.text) - LOAD_OFFSET) { + _text = .; /* Text and read-only data */ + /* First the code that has to be first for bootstrapping */ + *(.text.head) + _stext = .; + /* Then the rest */ + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) + _etext = .; /* End of text section */ + } :text = 0x9090 + + NOTES :text :note + + . = ALIGN(16); /* Exception table */ + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } :text = 0x9090 + + RODATA + + . = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */ + /* Data */ + .data : AT(ADDR(.data) - LOAD_OFFSET) { + DATA_DATA + CONSTRUCTORS + _edata = .; /* End of data section */ + } :data + + + .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); + . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); + *(.data.cacheline_aligned) + } + . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); + .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { + *(.data.read_mostly) + } + +#define VSYSCALL_ADDR (-10*1024*1024) +#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095)) +#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095)) + +#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) +#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) + +#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) +#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) + + . = VSYSCALL_ADDR; + .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user + __vsyscall_0 = VSYSCALL_VIRT_ADDR; + + . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); + .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) } + . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); + .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) + { *(.vsyscall_gtod_data) } + vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); + .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) + { *(.vsyscall_clock) } + vsyscall_clock = VVIRT(.vsyscall_clock); + + + .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) + { *(.vsyscall_1) } + .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) + { *(.vsyscall_2) } + + .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) } + vgetcpu_mode = VVIRT(.vgetcpu_mode); + + . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); + .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) } + jiffies = VVIRT(.jiffies); + + .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) + { *(.vsyscall_3) } + + . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; + +#undef VSYSCALL_ADDR +#undef VSYSCALL_PHYS_ADDR +#undef VSYSCALL_VIRT_ADDR +#undef VLOAD_OFFSET +#undef VLOAD +#undef VVIRT_OFFSET +#undef VVIRT + + .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { + . = ALIGN(THREAD_SIZE); /* init_task */ + *(.data.init_task) + }:data.init + + .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); + *(.data.page_aligned) + } + + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + /* might get freed after init */ + . = ALIGN(PAGE_SIZE); + __smp_alt_begin = .; + __smp_locks = .; + *(.smp_locks) + __smp_locks_end = .; + . = ALIGN(PAGE_SIZE); + __smp_alt_end = .; + } + + . = ALIGN(PAGE_SIZE); /* Init code and data */ + __init_begin = .; /* paired with __init_end */ + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { + _sinittext = .; + INIT_TEXT + _einittext = .; + } + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + __initdata_begin = .; + INIT_DATA + __initdata_end = .; + } + + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { + . = ALIGN(16); + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { + __initcall_start = .; + INITCALLS + __initcall_end = .; + } + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { + __x86_cpu_dev_start = .; + *(.x86_cpu_dev.init) + __x86_cpu_dev_end = .; + } + SECURITY_INIT + + . = ALIGN(8); + .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { + __parainstructions = .; + *(.parainstructions) + __parainstructions_end = .; + } + + .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { + . = ALIGN(8); + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { + *(.altinstr_replacement) + } + /* .exit.text is discard at runtime, not link time, to deal with references + from .altinstructions and .eh_frame */ + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { + EXIT_TEXT + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } + +#ifdef CONFIG_BLK_DEV_INITRD + . = ALIGN(PAGE_SIZE); + .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } +#endif + +#ifdef CONFIG_SMP + /* + * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the + * output PHDR, so the next output section - __data_nosave - should + * start another section data.init2. Also, pda should be at the head of + * percpu area. Preallocate it and define the percpu offset symbol + * so that it can be accessed as a percpu variable. + */ + . = ALIGN(PAGE_SIZE); + PERCPU_VADDR(0, :percpu) +#else + PERCPU(PAGE_SIZE) +#endif + + . = ALIGN(PAGE_SIZE); + __init_end = .; + + .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); + __nosave_begin = .; + *(.data.nosave) + . = ALIGN(PAGE_SIZE); + __nosave_end = .; + } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */ + + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); + __bss_start = .; /* BSS */ + *(.bss.page_aligned) + *(.bss) + __bss_stop = .; + } + + .brk : AT(ADDR(.brk) - LOAD_OFFSET) { + . = ALIGN(PAGE_SIZE); + __brk_base = . ; + . += 64 * 1024 ; /* 64k alignment slop space */ + *(.brk_reservation) /* areas brk users have reserved */ + __brk_limit = . ; + } + + _end = . ; + + /* Sections to be discarded */ + /DISCARD/ : { + *(.exitcall.exit) + *(.eh_frame) + *(.discard) + } + + STABS_DEBUG + + DWARF_DEBUG +} + + /* + * Per-cpu symbols which need to be offset from __per_cpu_load + * for the boot processor. + */ +#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load +INIT_PER_CPU(gdt_page); +INIT_PER_CPU(irq_stack_union); + +/* + * Build-time check on the image size: + */ +ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE") + +#ifdef CONFIG_SMP +ASSERT((per_cpu__irq_stack_union == 0), + "irq_stack_union is not at start of per-cpu area"); +#endif + +#ifdef CONFIG_KEXEC +#include + +ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, + "kexec control code size is too big") +#endif diff --git a/trunk/arch/x86/lguest/Makefile b/trunk/arch/x86/lguest/Makefile index 94e0e54056a9..27f0c9ed7f60 100644 --- a/trunk/arch/x86/lguest/Makefile +++ b/trunk/arch/x86/lguest/Makefile @@ -1,2 +1 @@ obj-y := i386_head.o boot.o -CFLAGS_boot.o := $(call cc-option, -fno-stack-protector) diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c index ef4205c1a7a5..ca7ec44bafc3 100644 --- a/trunk/arch/x86/lguest/boot.c +++ b/trunk/arch/x86/lguest/boot.c @@ -67,7 +67,6 @@ #include #include #include -#include #include /* for struct machine_ops */ /*G:010 Welcome to the Guest! @@ -637,7 +636,7 @@ static void __init lguest_init_IRQ(void) void lguest_setup_irq(unsigned int irq) { - irq_to_desc_alloc_node(irq, 0); + irq_to_desc_alloc_cpu(irq, 0); set_irq_chip_and_handler_name(irq, &lguest_irq_controller, handle_level_irq, "level"); } @@ -1089,21 +1088,13 @@ __init void lguest_init(void) * lguest_init() where the rest of the fairly chaotic boot setup * occurs. */ - /* The stack protector is a weird thing where gcc places a canary - * value on the stack and then checks it on return. This file is - * compiled with -fno-stack-protector it, so we got this far without - * problems. The value of the canary is kept at offset 20 from the - * %gs register, so we need to set that up before calling C functions - * in other files. */ - setup_stack_canary_segment(0); - /* We could just call load_stack_canary_segment(), but we might as - * call switch_to_new_gdt() which loads the whole table and sets up - * the per-cpu segment descriptor register %fs as well. */ - switch_to_new_gdt(0); - /* As described in head_32.S, we map the first 128M of memory. */ max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; + /* Load the %fs segment register (the per-cpu segment register) with + * the normal data segment to get through booting. */ + asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); + /* The Host<->Guest Switcher lives at the top of our address space, and * the Host told us how big it is when we made LGUEST_INIT hypercall: * it put the answer in lguest_data.reserve_mem */ diff --git a/trunk/arch/x86/mm/dump_pagetables.c b/trunk/arch/x86/mm/dump_pagetables.c index a725b7f760ae..e7277cbcfb40 100644 --- a/trunk/arch/x86/mm/dump_pagetables.c +++ b/trunk/arch/x86/mm/dump_pagetables.c @@ -161,14 +161,13 @@ static void note_page(struct seq_file *m, struct pg_state *st, st->current_address >= st->marker[1].start_address) { const char *unit = units; unsigned long delta; - int width = sizeof(unsigned long) * 2; /* * Now print the actual finished series */ - seq_printf(m, "0x%0*lx-0x%0*lx ", - width, st->start_address, - width, st->current_address); + seq_printf(m, "0x%p-0x%p ", + (void *)st->start_address, + (void *)st->current_address); delta = (st->current_address - st->start_address) >> 10; while (!(delta & 1023) && unit[1]) { diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c index b9ca6d767dbb..a03b7279efa0 100644 --- a/trunk/arch/x86/mm/fault.c +++ b/trunk/arch/x86/mm/fault.c @@ -3,16 +3,40 @@ * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar */ -#include /* STACK_END_MAGIC */ -#include /* test_thread_flag(), ... */ -#include /* oops_begin/end, ... */ -#include /* search_exception_table */ -#include /* max_low_pfn */ -#include /* __kprobes, ... */ -#include /* kmmio_handler, ... */ - -#include /* dotraplinkage, ... */ -#include /* pgd_*(), ... */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include /* * Page fault error code bits: @@ -514,6 +538,8 @@ static void dump_pagetable(unsigned long address) static int is_errata93(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 + static int once; + if (address != regs->ip) return 0; @@ -523,7 +549,10 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) address |= 0xffffffffUL << 32; if ((address >= (u64)_stext && address <= (u64)_etext) || (address >= MODULES_VADDR && address <= MODULES_END)) { - printk_once(errata93_warning); + if (!once) { + printk(errata93_warning); + once = 1; + } regs->ip = address; return 1; } diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index 34c1bfb64f1c..ae4f7b5d7104 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -1,4 +1,3 @@ -#include #include #include @@ -11,9 +10,6 @@ #include #include #include -#include - -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long __initdata e820_table_start; unsigned long __meminitdata e820_table_end; @@ -27,69 +23,6 @@ int direct_gbpages #endif ; -int nx_enabled; - -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) -static int disable_nx __cpuinitdata; - -/* - * noexec = on|off - * - * Control non-executable mappings for processes. - * - * on Enable - * off Disable - */ -static int __init noexec_setup(char *str) -{ - if (!str) - return -EINVAL; - if (!strncmp(str, "on", 2)) { - __supported_pte_mask |= _PAGE_NX; - disable_nx = 0; - } else if (!strncmp(str, "off", 3)) { - disable_nx = 1; - __supported_pte_mask &= ~_PAGE_NX; - } - return 0; -} -early_param("noexec", noexec_setup); -#endif - -#ifdef CONFIG_X86_PAE -static void __init set_nx(void) -{ - unsigned int v[4], l, h; - - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); - - if ((v[3] & (1 << 20)) && !disable_nx) { - rdmsr(MSR_EFER, l, h); - l |= EFER_NX; - wrmsr(MSR_EFER, l, h); - nx_enabled = 1; - __supported_pte_mask |= _PAGE_NX; - } - } -} -#else -static inline void set_nx(void) -{ -} -#endif - -#ifdef CONFIG_X86_64 -void __cpuinit check_efer(void) -{ - unsigned long efer; - - rdmsrl(MSR_EFER, efer); - if (!(efer & EFER_NX) || disable_nx) - __supported_pte_mask &= ~_PAGE_NX; -} -#endif - static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { @@ -133,11 +66,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse, */ #ifdef CONFIG_X86_32 start = 0x7000; -#else - start = 0x8000; -#endif e820_table_start = find_e820_area(start, max_pfn_mapped< #include #include -#include #include +unsigned long max_low_pfn_mapped; +unsigned long max_pfn_mapped; + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); @@ -584,9 +587,61 @@ void zap_low_mappings(void) flush_tlb_all(); } +int nx_enabled; + pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); +#ifdef CONFIG_X86_PAE + +static int disable_nx __initdata; + +/* + * noexec = on|off + * + * Control non executable mappings. + * + * on Enable + * off Disable + */ +static int __init noexec_setup(char *str) +{ + if (!str || !strcmp(str, "on")) { + if (cpu_has_nx) { + __supported_pte_mask |= _PAGE_NX; + disable_nx = 0; + } + } else { + if (!strcmp(str, "off")) { + disable_nx = 1; + __supported_pte_mask &= ~_PAGE_NX; + } else { + return -EINVAL; + } + } + + return 0; +} +early_param("noexec", noexec_setup); + +void __init set_nx(void) +{ + unsigned int v[4], l, h; + + if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { + cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); + + if ((v[3] & (1 << 20)) && !disable_nx) { + rdmsr(MSR_EFER, l, h); + l |= EFER_NX; + wrmsr(MSR_EFER, l, h); + nx_enabled = 1; + __supported_pte_mask |= _PAGE_NX; + } + } +} +#endif + /* user-defined highmem size */ static unsigned int highmem_pages = -1; @@ -706,15 +761,15 @@ void __init initmem_init(unsigned long start_pfn, highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; + memory_present(0, 0, highend_pfn); e820_register_active_regions(0, 0, highend_pfn); - sparse_memory_present_with_active_regions(0); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else + memory_present(0, 0, max_low_pfn); e820_register_active_regions(0, 0, max_low_pfn); - sparse_memory_present_with_active_regions(0); num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif diff --git a/trunk/arch/x86/mm/init_64.c b/trunk/arch/x86/mm/init_64.c index 52bb9519bb86..1753e8020df6 100644 --- a/trunk/arch/x86/mm/init_64.c +++ b/trunk/arch/x86/mm/init_64.c @@ -50,8 +50,18 @@ #include #include +/* + * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. + * The direct mapping extends to max_pfn_mapped, so that we can directly access + * apertures, ACPI and other tables without having to play with fixmaps. + */ +unsigned long max_low_pfn_mapped; +unsigned long max_pfn_mapped; + static unsigned long dma_reserve __initdata; +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + static int __init parse_direct_gbpages_off(char *arg) { direct_gbpages = 0; @@ -75,6 +85,39 @@ early_param("gbpages", parse_direct_gbpages_on); pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; EXPORT_SYMBOL_GPL(__supported_pte_mask); +static int disable_nx __cpuinitdata; + +/* + * noexec=on|off + * Control non-executable mappings for 64-bit processes. + * + * on Enable (default) + * off Disable + */ +static int __init nonx_setup(char *str) +{ + if (!str) + return -EINVAL; + if (!strncmp(str, "on", 2)) { + __supported_pte_mask |= _PAGE_NX; + disable_nx = 0; + } else if (!strncmp(str, "off", 3)) { + disable_nx = 1; + __supported_pte_mask &= ~_PAGE_NX; + } + return 0; +} +early_param("noexec", nonx_setup); + +void __cpuinit check_efer(void) +{ + unsigned long efer; + + rdmsrl(MSR_EFER, efer); + if (!(efer & EFER_NX) || disable_nx) + __supported_pte_mask &= ~_PAGE_NX; +} + int force_personality32; /* @@ -585,7 +628,6 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn) early_res_to_bootmem(0, end_pfn<> PAGE_SHIFT; unsigned long e_pfn = end >> PAGE_SHIFT; - int changed = 0; + int ret = 0, changed = 0; struct bootnode *nd = &nodes_add[node]; /* I had some trouble with strange memory hotadd regions breaking @@ -197,7 +210,7 @@ update_nodes_add(int node, unsigned long start, unsigned long end) mistakes */ if ((signed long)(end - start) < NODE_MIN_SIZE) { printk(KERN_ERR "SRAT: Hotplug area too small\n"); - return; + return -1; } /* This check might be a bit too strict, but I'm keeping it for now. */ @@ -205,7 +218,12 @@ update_nodes_add(int node, unsigned long start, unsigned long end) printk(KERN_ERR "SRAT: Hotplug area %lu -> %lu has existing memory\n", s_pfn, e_pfn); - return; + return -1; + } + + if (!hotadd_enough_memory(&nodes_add[node])) { + printk(KERN_ERR "SRAT: Hotplug area too large\n"); + return -1; } /* Looks good */ @@ -227,9 +245,11 @@ update_nodes_add(int node, unsigned long start, unsigned long end) printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); } + ret = update_end_of_memory(nd->end); + if (changed) - printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", - nd->start, nd->end); + printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); + return ret; } /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ @@ -290,10 +310,13 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) start, end); e820_register_active_regions(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT); + push_node_boundaries(node, nd->start >> PAGE_SHIFT, + nd->end >> PAGE_SHIFT); - if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) { - update_nodes_add(node, start, end); - /* restore nodes[node] */ + if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && + (reserve_hotadd(node, start, end) < 0)) { + /* Ignore hotadd region. Undo damage */ + printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); *nd = oldnode; if ((nd->start | nd->end) == 0) node_clear(node, nodes_parsed); @@ -322,9 +345,9 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) pxmram = 0; } - e820ram = max_pfn - (e820_hole_size(0, max_pfn<>PAGE_SHIFT); - /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ - if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { + e820ram = max_pfn - absent_pages_in_range(0, max_pfn); + /* We seem to lose 3 pages somewhere. Allow a bit of slack. */ + if ((long)(e820ram - pxmram) >= 1*1024*1024) { printk(KERN_ERR "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", (pxmram << PAGE_SHIFT) >> 20, @@ -334,6 +357,17 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) return 1; } +static void __init unparse_node(int node) +{ + int i; + node_clear(node, nodes_parsed); + node_clear(node, cpu_nodes_parsed); + for (i = 0; i < MAX_LOCAL_APIC; i++) { + if (apicid_to_node[i] == node) + apicid_to_node[i] = NUMA_NO_NODE; + } +} + void __init acpi_numa_arch_fixup(void) {} /* Use the information discovered above to actually set up the nodes. */ @@ -345,8 +379,18 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) return -1; /* First clean up the node list */ - for (i = 0; i < MAX_NUMNODES; i++) + for (i = 0; i < MAX_NUMNODES; i++) { cutoff_node(i, start, end); + /* + * don't confuse VM with a node that doesn't have the + * minimum memory. + */ + if (nodes[i].end && + (nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { + unparse_node(i); + node_set_offline(i); + } + } if (!nodes_cover_memory(nodes)) { bad_srat(); @@ -379,7 +423,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) if (node == NUMA_NO_NODE) continue; - if (!node_online(node)) + if (!node_isset(node, node_possible_map)) numa_clear_node(i); } numa_init_array(); @@ -466,6 +510,26 @@ static int null_slit_node_compare(int a, int b) } #endif /* CONFIG_NUMA_EMU */ +void __init srat_reserve_add_area(int nodeid) +{ + if (found_add_area && nodes_add[nodeid].end) { + u64 total_mb; + + printk(KERN_INFO "SRAT: Reserving hot-add memory space " + "for node %d at %Lx-%Lx\n", + nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end); + total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start) + >> PAGE_SHIFT; + total_mb *= sizeof(struct page); + total_mb >>= 20; + printk(KERN_INFO "SRAT: This will cost you %Lu MB of " + "pre-allocated memory.\n", (unsigned long long)total_mb); + reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start, + nodes_add[nodeid].end - nodes_add[nodeid].start, + BOOTMEM_DEFAULT); + } +} + int __node_distance(int a, int b) { int index; diff --git a/trunk/arch/x86/pci/irq.c b/trunk/arch/x86/pci/irq.c index 0696d506c4ad..fecbce6e7d7c 100644 --- a/trunk/arch/x86/pci/irq.c +++ b/trunk/arch/x86/pci/irq.c @@ -889,9 +889,6 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) return 0; } - if (io_apic_assign_pci_irqs) - return 0; - /* Find IRQ routing entry */ if (!pirq_table) @@ -1042,15 +1039,56 @@ static void __init pcibios_fixup_irqs(void) pirq_penalty[dev->irq]++; } - if (io_apic_assign_pci_irqs) - return; - dev = NULL; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; +#ifdef CONFIG_X86_IO_APIC + /* + * Recalculate IRQ numbers if we use the I/O APIC. + */ + if (io_apic_assign_pci_irqs) { + int irq; + + /* + * interrupt pins are numbered starting from 1 + */ + irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, + PCI_SLOT(dev->devfn), pin - 1); + /* + * Busses behind bridges are typically not listed in the + * MP-table. In this case we have to look up the IRQ + * based on the parent bus, parent slot, and pin number. + * The SMP code detects such bridged busses itself so we + * should get into this branch reliably. + */ + if (irq < 0 && dev->bus->parent) { + /* go back to the bridge */ + struct pci_dev *bridge = dev->bus->self; + int bus; + + pin = pci_swizzle_interrupt_pin(dev, pin); + bus = bridge->bus->number; + irq = IO_APIC_get_PCI_irq_vector(bus, + PCI_SLOT(bridge->devfn), pin - 1); + if (irq >= 0) + dev_warn(&dev->dev, + "using bridge %s INT %c to " + "get IRQ %d\n", + pci_name(bridge), + 'A' + pin - 1, irq); + } + if (irq >= 0) { + dev_info(&dev->dev, + "PCI->APIC IRQ transform: INT %c " + "-> IRQ %d\n", + 'A' + pin - 1, irq); + dev->irq = irq; + } + } +#endif /* * Still no IRQ? Try to lookup one... */ @@ -1145,19 +1183,6 @@ int __init pcibios_irq_init(void) pcibios_enable_irq = pirq_enable_irq; pcibios_fixup_irqs(); - - if (io_apic_assign_pci_irqs && pci_routeirq) { - struct pci_dev *dev = NULL; - /* - * PCI IRQ routing is set up by pci_enable_device(), but we - * also do it here in case there are still broken drivers that - * don't use pci_enable_device(). - */ - printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); - for_each_pci_dev(dev) - pirq_enable_irq(dev); - } - return 0; } @@ -1188,23 +1213,16 @@ void pcibios_penalize_isa_irq(int irq, int active) static int pirq_enable_irq(struct pci_dev *dev) { u8 pin; + struct pci_dev *temp_dev; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); - if (pin && !pcibios_lookup_irq(dev, 1)) { + if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { char *msg = ""; - if (!io_apic_assign_pci_irqs && dev->irq) - return 0; - if (io_apic_assign_pci_irqs) { -#ifdef CONFIG_X86_IO_APIC - struct pci_dev *temp_dev; int irq; - struct io_apic_irq_attr irq_attr; - irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, - PCI_SLOT(dev->devfn), - pin - 1, &irq_attr); + irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1); /* * Busses behind bridges are typically not listed in the MP-table. * In this case we have to look up the IRQ based on the parent bus, @@ -1217,8 +1235,7 @@ static int pirq_enable_irq(struct pci_dev *dev) pin = pci_swizzle_interrupt_pin(dev, pin); irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, - PCI_SLOT(bridge->devfn), - pin - 1, &irq_attr); + PCI_SLOT(bridge->devfn), pin - 1); if (irq >= 0) dev_warn(&dev->dev, "using bridge %s " "INT %c to get IRQ %d\n", @@ -1228,15 +1245,12 @@ static int pirq_enable_irq(struct pci_dev *dev) } dev = temp_dev; if (irq >= 0) { - io_apic_set_pci_routing(&dev->dev, irq, - &irq_attr); - dev->irq = irq; dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); + dev->irq = irq; return 0; } else msg = "; probably buggy MP table"; -#endif } else if (pci_probe & PCI_BIOS_IRQ_SCAN) msg = ""; else diff --git a/trunk/arch/x86/pci/mmconfig-shared.c b/trunk/arch/x86/pci/mmconfig-shared.c index 8766b0e216c5..5fa10bb9604f 100644 --- a/trunk/arch/x86/pci/mmconfig-shared.c +++ b/trunk/arch/x86/pci/mmconfig-shared.c @@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, if (!fixmem32) return AE_OK; if ((mcfg_res->start >= fixmem32->address) && - (mcfg_res->end < (fixmem32->address + + (mcfg_res->end <= (fixmem32->address + fixmem32->address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; @@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, return AE_OK; if ((mcfg_res->start >= address.minimum) && - (mcfg_res->end < (address.minimum + address.address_length))) { + (mcfg_res->end <= (address.minimum + address.address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } @@ -418,7 +418,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used) struct resource mcfg_res; mcfg_res.start = start; - mcfg_res.end = end - 1; + mcfg_res.end = end; mcfg_res.flags = 0; acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL); diff --git a/trunk/arch/x86/vdso/vma.c b/trunk/arch/x86/vdso/vma.c index cac083386e03..7133cdf9098b 100644 --- a/trunk/arch/x86/vdso/vma.c +++ b/trunk/arch/x86/vdso/vma.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/block/bsg.c b/trunk/block/bsg.c index dd81be455e00..206060e795da 100644 --- a/trunk/block/bsg.c +++ b/trunk/block/bsg.c @@ -315,7 +315,6 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); - next_rq->bio = NULL; blk_put_request(next_rq); } return ERR_PTR(ret); @@ -449,7 +448,6 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, hdr->dout_resid = rq->data_len; hdr->din_resid = rq->next_rq->data_len; blk_rq_unmap_user(bidi_bio); - rq->next_rq->bio = NULL; blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) hdr->din_resid = rq->data_len; @@ -468,7 +466,6 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, blk_rq_unmap_user(bio); if (rq->cmd != rq->__cmd) kfree(rq->cmd); - rq->bio = NULL; blk_put_request(rq); return ret; diff --git a/trunk/crypto/ahash.c b/trunk/crypto/ahash.c index f3476374f764..b2d1ee32cfe8 100644 --- a/trunk/crypto/ahash.c +++ b/trunk/crypto/ahash.c @@ -82,11 +82,10 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (err) return err; - if (nbytes) { - walk->offset = 0; - walk->pg++; + walk->offset = 0; + + if (nbytes) return hash_walk_next(walk); - } if (!walk->total) return 0; diff --git a/trunk/drivers/acpi/pci_irq.c b/trunk/drivers/acpi/pci_irq.c index 2faa9e2ac893..51b9f8280f88 100644 --- a/trunk/drivers/acpi/pci_irq.c +++ b/trunk/drivers/acpi/pci_irq.c @@ -401,8 +401,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF)) { printk(" - using IRQ %d\n", dev->irq); - acpi_register_gsi(&dev->dev, dev->irq, - ACPI_LEVEL_SENSITIVE, + acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; } else { @@ -411,7 +410,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) } } - rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); + rc = acpi_register_gsi(gsi, triggering, polarity); if (rc < 0) { dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", pin_name(pin)); diff --git a/trunk/drivers/acpi/processor_core.c b/trunk/drivers/acpi/processor_core.c index 23f0fb84f1c1..45ad3288c5ff 100644 --- a/trunk/drivers/acpi/processor_core.c +++ b/trunk/drivers/acpi/processor_core.c @@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device) if (!pr) return -ENOMEM; - if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { + if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { kfree(pr); return -ENOMEM; } diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 6b91c26a4635..08186ecbaf8d 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -220,7 +220,6 @@ enum { AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ - AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ /* ap->flags bits */ @@ -2317,17 +2316,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); - struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; u32 ctl; - if (mesg.event & PM_EVENT_SUSPEND && - hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { - dev_printk(KERN_ERR, &pdev->dev, - "BIOS update required for suspend/resume\n"); - return -EIO; - } - if (mesg.event & PM_EVENT_SLEEP) { /* AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a @@ -2619,63 +2610,6 @@ static bool ahci_broken_system_poweroff(struct pci_dev *pdev) return false; } -static bool ahci_broken_suspend(struct pci_dev *pdev) -{ - static const struct dmi_system_id sysids[] = { - /* - * On HP dv[4-6] and HDX18 with earlier BIOSen, link - * to the harddisk doesn't become online after - * resuming from STR. Warn and fail suspend. - */ - { - .ident = "dv4", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, - "HP Pavilion dv4 Notebook PC"), - }, - .driver_data = "F.30", /* cutoff BIOS version */ - }, - { - .ident = "dv5", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, - "HP Pavilion dv5 Notebook PC"), - }, - .driver_data = "F.16", /* cutoff BIOS version */ - }, - { - .ident = "dv6", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, - "HP Pavilion dv6 Notebook PC"), - }, - .driver_data = "F.21", /* cutoff BIOS version */ - }, - { - .ident = "HDX18", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, - "HP HDX18 Notebook PC"), - }, - .driver_data = "F.23", /* cutoff BIOS version */ - }, - { } /* terminate list */ - }; - const struct dmi_system_id *dmi = dmi_first_match(sysids); - const char *ver; - - if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) - return false; - - ver = dmi_get_system_info(DMI_BIOS_VERSION); - - return !ver || strcmp(ver, dmi->driver_data) < 0; -} - static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; @@ -2781,12 +2715,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "quirky BIOS, skipping spindown on poweroff\n"); } - if (ahci_broken_suspend(pdev)) { - hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; - dev_printk(KERN_WARNING, &pdev->dev, - "BIOS update required for suspend/resume\n"); - } - /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 1aeb7082b0c4..d51a17c0f59b 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -1455,15 +1455,6 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev) /* PCI slot number of the controller */ .driver_data = (void *)0x1FUL, }, - { - .ident = "HP Compaq nc6000", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"), - }, - /* PCI slot number of the controller */ - .driver_data = (void *)0x1FUL, - }, { } /* terminate list */ }; diff --git a/trunk/drivers/ata/pata_ali.c b/trunk/drivers/ata/pata_ali.c index fc9c5d6d7d80..751b7ea4816c 100644 --- a/trunk/drivers/ata/pata_ali.c +++ b/trunk/drivers/ata/pata_ali.c @@ -497,16 +497,14 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0x20 added DMA */ static const struct ata_port_info info_20 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | - ATA_FLAG_IGN_SIMPLEX, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &ali_20_port_ops }; /* Revision 0x20 with support logic added UDMA */ static const struct ata_port_info info_20_udma = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | - ATA_FLAG_IGN_SIMPLEX, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, @@ -514,8 +512,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC2 adds UDMA66 */ static const struct ata_port_info info_c2 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | - ATA_FLAG_IGN_SIMPLEX, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, @@ -523,8 +520,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC3 is UDMA66 for now */ static const struct ata_port_info info_c3 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | - ATA_FLAG_IGN_SIMPLEX, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, @@ -532,8 +528,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC4 is UDMA100 */ static const struct ata_port_info info_c4 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | - ATA_FLAG_IGN_SIMPLEX, + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, @@ -541,7 +536,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; /* Revision 0xC5 is UDMA133 with LBA48 DMA */ static const struct ata_port_info info_c5 = { - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, diff --git a/trunk/drivers/ata/pata_efar.c b/trunk/drivers/ata/pata_efar.c index 2a6412f5d117..2085e0a3a05a 100644 --- a/trunk/drivers/ata/pata_efar.c +++ b/trunk/drivers/ata/pata_efar.c @@ -22,7 +22,7 @@ #include #define DRV_NAME "pata_efar" -#define DRV_VERSION "0.4.5" +#define DRV_VERSION "0.4.4" /** * efar_pre_reset - Enable bits @@ -98,17 +98,18 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) { 2, 1 }, { 2, 3 }, }; - if (pio > 1) - control |= 1; /* TIME */ + if (pio > 2) + control |= 1; /* TIME1 enable */ if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ - control |= 2; /* IE */ - /* Intel specifies that the prefetch/posting is for disk only */ + control |= 2; /* IE enable */ + /* Intel specifies that the PPE functionality is for disk only */ if (adev->class == ATA_DEV_ATA) - control |= 4; /* PPE */ + control |= 4; /* PPE enable */ pci_read_config_word(dev, idetm_port, &idetm_data); - /* Set PPE, IE, and TIME as appropriate */ + /* Enable PPE, IE and TIME as appropriate */ + if (adev->devno == 0) { idetm_data &= 0xCCF0; idetm_data |= control; @@ -128,7 +129,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev) pci_write_config_byte(dev, 0x44, slave_data); } - idetm_data |= 0x4000; /* Ensure SITRE is set */ + idetm_data |= 0x4000; /* Ensure SITRE is enabled */ pci_write_config_word(dev, idetm_port, idetm_data); } diff --git a/trunk/drivers/ata/pata_legacy.c b/trunk/drivers/ata/pata_legacy.c index 6932e56d179c..f72c6c5b820f 100644 --- a/trunk/drivers/ata/pata_legacy.c +++ b/trunk/drivers/ata/pata_legacy.c @@ -48,7 +48,6 @@ * */ -#include #include #include #include @@ -1029,7 +1028,6 @@ static __init int legacy_init_one(struct legacy_probe *probe) &legacy_sht); if (ret) goto fail; - async_synchronize_full(); ld->platform_dev = pdev; /* Nothing found means we drop the port as its probably not there */ diff --git a/trunk/drivers/ata/pata_netcell.c b/trunk/drivers/ata/pata_netcell.c index f0d52f72f5bb..bdb236957cb9 100644 --- a/trunk/drivers/ata/pata_netcell.c +++ b/trunk/drivers/ata/pata_netcell.c @@ -20,24 +20,13 @@ /* No PIO or DMA methods needed for this device */ -static unsigned int netcell_read_id(struct ata_device *adev, - struct ata_taskfile *tf, u16 *id) -{ - unsigned int err_mask = ata_do_dev_read_id(adev, tf, id); - /* Firmware forgets to mark words 85-87 valid */ - if (err_mask == 0) - id[ATA_ID_CSF_DEFAULT] |= 0x4000; - return err_mask; -} - static struct scsi_host_template netcell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations netcell_ops = { .inherits = &ata_bmdma_port_ops, - .cable_detect = ata_cable_80wire, - .read_id = netcell_read_id, + .cable_detect = ata_cable_80wire, }; diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c index 4a9f3492b921..340ba4f9dc54 100644 --- a/trunk/drivers/char/hpet.c +++ b/trunk/drivers/char/hpet.c @@ -224,7 +224,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp) break; } - gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, + gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); if (gsi > 0) break; @@ -939,7 +939,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) irqp = &res->data.extended_irq; for (i = 0; i < irqp->interrupt_count; i++) { - irq = acpi_register_gsi(NULL, irqp->interrupts[i], + irq = acpi_register_gsi(irqp->interrupts[i], irqp->triggering, irqp->polarity); if (irq < 0) return AE_ERROR; diff --git a/trunk/drivers/char/mem.c b/trunk/drivers/char/mem.c index f96d0bef855e..8f05c38c2f06 100644 --- a/trunk/drivers/char/mem.c +++ b/trunk/drivers/char/mem.c @@ -694,8 +694,6 @@ static ssize_t read_zero(struct file * file, char __user * buf, written += chunk - unwritten; if (unwritten) break; - if (signal_pending(current)) - return written ? written : -ERESTARTSYS; buf += chunk; count -= chunk; cond_resched(); diff --git a/trunk/drivers/char/mxser.c b/trunk/drivers/char/mxser.c index 13f8871e5b21..a420e8d437dd 100644 --- a/trunk/drivers/char/mxser.c +++ b/trunk/drivers/char/mxser.c @@ -2711,7 +2711,7 @@ static int __init mxser_module_init(void) continue; brd = &mxser_boards[m]; - retval = mxser_get_ISA_conf(ioaddr[b], brd); + retval = mxser_get_ISA_conf(!ioaddr[b], brd); if (retval <= 0) { brd->info = NULL; continue; diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 6e2ec0b18948..47d2ad0ae079 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = -ENOMEM; goto nomem_out; } - if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { + if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { free_cpumask_var(policy->cpus); kfree(policy); ret = -ENOMEM; diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 8fab7890a363..94a768871734 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -2294,12 +2294,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, } } - /* Do DPMS ourselves */ - if (property == connector->dev->mode_config.dpms_property) { - if (connector->funcs->dpms) - (*connector->funcs->dpms)(connector, (int) out_resp->value); - ret = 0; - } else if (connector->funcs->set_property) + if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, out_resp->value); /* store the property value if succesful */ diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c index a6f73f1e99d9..45890447feec 100644 --- a/trunk/drivers/gpu/drm/drm_crtc_helper.c +++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c @@ -198,29 +198,6 @@ static void drm_helper_add_std_modes(struct drm_device *dev, } } -/** - * drm_helper_encoder_in_use - check if a given encoder is in use - * @encoder: encoder to check - * - * LOCKING: - * Caller must hold mode config lock. - * - * Walk @encoders's DRM device's mode_config and see if it's in use. - * - * RETURNS: - * True if @encoder is part of the mode_config, false otherwise. - */ -bool drm_helper_encoder_in_use(struct drm_encoder *encoder) -{ - struct drm_connector *connector; - struct drm_device *dev = encoder->dev; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - return true; - return false; -} -EXPORT_SYMBOL(drm_helper_encoder_in_use); - /** * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config * @crtc: CRTC to check @@ -239,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; /* FIXME: Locking around list access? */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) + if (encoder->crtc == crtc) return true; return false; } @@ -263,7 +240,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { encoder_funcs = encoder->helper_private; - if (!drm_helper_encoder_in_use(encoder)) + if (!encoder->crtc) (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); } @@ -958,88 +935,6 @@ bool drm_helper_initial_config(struct drm_device *dev) } EXPORT_SYMBOL(drm_helper_initial_config); -static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) -{ - int dpms = DRM_MODE_DPMS_OFF; - struct drm_connector *connector; - struct drm_device *dev = encoder->dev; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - if (connector->dpms < dpms) - dpms = connector->dpms; - return dpms; -} - -static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) -{ - int dpms = DRM_MODE_DPMS_OFF; - struct drm_connector *connector; - struct drm_device *dev = crtc->dev; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder && connector->encoder->crtc == crtc) - if (connector->dpms < dpms) - dpms = connector->dpms; - return dpms; -} - -/** - * drm_helper_connector_dpms - * @connector affected connector - * @mode DPMS mode - * - * Calls the low-level connector DPMS function, then - * calls appropriate encoder and crtc DPMS functions as well - */ -void drm_helper_connector_dpms(struct drm_connector *connector, int mode) -{ - struct drm_encoder *encoder = connector->encoder; - struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; - int old_dpms; - - if (mode == connector->dpms) - return; - - old_dpms = connector->dpms; - connector->dpms = mode; - - /* from off to on, do crtc then encoder */ - if (mode < old_dpms) { - if (crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (crtc_funcs->dpms) - (*crtc_funcs->dpms) (crtc, - drm_helper_choose_crtc_dpms(crtc)); - } - if (encoder) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - if (encoder_funcs->dpms) - (*encoder_funcs->dpms) (encoder, - drm_helper_choose_encoder_dpms(encoder)); - } - } - - /* from on to off, do encoder then crtc */ - if (mode > old_dpms) { - if (encoder) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - if (encoder_funcs->dpms) - (*encoder_funcs->dpms) (encoder, - drm_helper_choose_encoder_dpms(encoder)); - } - if (crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - if (crtc_funcs->dpms) - (*crtc_funcs->dpms) (crtc, - drm_helper_choose_crtc_dpms(crtc)); - } - } - - return; -} -EXPORT_SYMBOL(drm_helper_connector_dpms); - /** * drm_hotplug_stage_two * @dev DRM device diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index 6f6b26479d82..ca9c61656714 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -289,11 +289,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, struct drm_display_mode *mode; struct detailed_pixel_timing *pt = &timing->data.pixel_data; - /* ignore tiny modes */ - if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 || - ((pt->vactive_hi << 8) | pt->hactive_lo) < 64) - return NULL; - if (pt->stereo) { printk(KERN_WARNING "stereo mode not supported\n"); return NULL; diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index fc8e5acd9d9a..93e677a481f5 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -196,7 +196,6 @@ int drm_irq_install(struct drm_device *dev) { int ret = 0; unsigned long sh_flags = 0; - char *irqname; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; @@ -228,13 +227,8 @@ int drm_irq_install(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) sh_flags = IRQF_SHARED; - if (dev->devname) - irqname = dev->devname; - else - irqname = dev->driver->name; - ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, - sh_flags, irqname, dev); + sh_flags, dev->devname, dev); if (ret < 0) { mutex_lock(&dev->struct_mutex); diff --git a/trunk/drivers/gpu/drm/drm_sysfs.c b/trunk/drivers/gpu/drm/drm_sysfs.c index 9987ab880835..8f9372921f82 100644 --- a/trunk/drivers/gpu/drm/drm_sysfs.c +++ b/trunk/drivers/gpu/drm/drm_sysfs.c @@ -147,7 +147,7 @@ static ssize_t status_show(struct device *device, enum drm_connector_status status; status = connector->funcs->detect(connector); - return snprintf(buf, PAGE_SIZE, "%s\n", + return snprintf(buf, PAGE_SIZE, "%s", drm_get_connector_status_name(status)); } @@ -166,7 +166,7 @@ static ssize_t dpms_show(struct device *device, if (ret) return 0; - return snprintf(buf, PAGE_SIZE, "%s\n", + return snprintf(buf, PAGE_SIZE, "%s", drm_get_dpms_name((int)dpms_status)); } @@ -176,7 +176,7 @@ static ssize_t enabled_show(struct device *device, { struct drm_connector *connector = to_drm_connector(device); - return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" : + return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" : "disabled"); } @@ -317,7 +317,6 @@ static struct device_attribute connector_attrs_opt1[] = { static struct bin_attribute edid_attr = { .attr.name = "edid", - .attr.mode = 0444, .size = 128, .read = edid_show, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index 0ccb63ee50ee..53d544552625 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -987,6 +987,12 @@ static int i915_load_modeset_init(struct drm_device *dev) int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; + dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL); + if (!dev->devname) { + ret = -ENOMEM; + goto out; + } + dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 0xff000000; @@ -1000,7 +1006,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = i915_probe_agp(dev, &agp_size, &prealloc_size); if (ret) - goto out; + goto kfree_devname; /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); @@ -1018,7 +1024,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = i915_gem_init_ringbuffer(dev); if (ret) - goto out; + goto kfree_devname; /* Allow hardware batchbuffers unless told otherwise. */ @@ -1050,6 +1056,8 @@ static int i915_load_modeset_init(struct drm_device *dev) destroy_ringbuffer: i915_gem_cleanup_ringbuffer(dev); +kfree_devname: + kfree(dev->devname); out: return ret; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 39f5c658ef5e..670d12881468 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -2260,6 +2260,9 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) goto try_again; } + BUG_ON(old_obj_priv->active || + (reg->obj->write_domain & I915_GEM_GPU_DOMAINS)); + /* * Zap this virtual mapping so we can set up a fence again * for this object next time we need it. diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 79acc4f4c1f8..640f5158effc 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -381,6 +381,11 @@ static int intel_crt_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + struct drm_device *dev = connector->dev; + + if (property == dev->mode_config.dpms_property && connector->encoder) + intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf)); + return 0; } @@ -397,7 +402,6 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { }; static const struct drm_connector_funcs intel_crt_connector_funcs = { - .dpms = drm_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_crt_destroy, diff --git a/trunk/drivers/gpu/drm/i915/intel_dvo.c b/trunk/drivers/gpu/drm/i915/intel_dvo.c index 1ee3007d6ec0..8b8d6e65cd3f 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_dvo.c @@ -316,7 +316,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { }; static const struct drm_connector_funcs intel_dvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, .save = intel_dvo_save, .restore = intel_dvo_restore, .detect = intel_dvo_detect, diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c index 7d6bdd705326..d0983bb93a18 100644 --- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c +++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c @@ -219,7 +219,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { }; static const struct drm_connector_funcs intel_hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, .save = intel_hdmi_save, .restore = intel_hdmi_restore, .detect = intel_hdmi_detect, diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index 53cccfa58b95..53731f0ffcb5 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -343,6 +343,11 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + struct drm_device *dev = connector->dev; + + if (property == dev->mode_config.dpms_property && connector->encoder) + intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf)); + return 0; } @@ -361,7 +366,6 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { - .dpms = drm_helper_connector_dpms, .save = intel_lvds_save, .restore = intel_lvds_restore, .detect = intel_lvds_detect, @@ -387,7 +391,7 @@ static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) } /* These systems claim to have LVDS, but really don't */ -static const struct dmi_system_id intel_no_lvds[] = { +static const struct dmi_system_id __initdata intel_no_lvds[] = { { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core series)", diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 3093b4d4a4dd..f3ef6bfd8ffc 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -1616,7 +1616,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, .save = intel_sdvo_save, .restore = intel_sdvo_restore, .detect = intel_sdvo_detect, diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c index 98ac0546b7bd..d2c32983242d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_tv.c +++ b/trunk/drivers/gpu/drm/i915/intel_tv.c @@ -1626,7 +1626,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { }; static const struct drm_connector_funcs intel_tv_connector_funcs = { - .dpms = drm_helper_connector_dpms, .save = intel_tv_save, .restore = intel_tv_restore, .detect = intel_tv_detect, diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cp.c b/trunk/drivers/gpu/drm/radeon/radeon_cp.c index aff90bb96488..77a7a4d84650 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cp.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cp.c @@ -2185,9 +2185,9 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv) /* check if the ring is padded out to 16-dword alignment */ - tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1); + tail_aligned = dev_priv->ring.tail & 0xf; if (tail_aligned) { - int num_p2 = RADEON_RING_ALIGN - tail_aligned; + int num_p2 = 16 - tail_aligned; ring = dev_priv->ring.start; /* pad with some CP_PACKET2 */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.h b/trunk/drivers/gpu/drm/radeon/radeon_drv.h index 0c6bfc1de153..8071d965f142 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.h @@ -1964,14 +1964,11 @@ do { \ #define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; -#define RADEON_RING_ALIGN 16 - #define BEGIN_RING( n ) do { \ if ( RADEON_VERBOSE ) { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ - _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \ - _align_nr += n; \ + _align_nr = (n + 0xf) & ~0xf; \ if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \ COMMIT_RING(); \ radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \ diff --git a/trunk/drivers/ide/pdc202xx_old.c b/trunk/drivers/ide/pdc202xx_old.c index b3bc96f930a6..248a54bd2386 100644 --- a/trunk/drivers/ide/pdc202xx_old.c +++ b/trunk/drivers/ide/pdc202xx_old.c @@ -1,6 +1,6 @@ /* * Copyright (C) 1998-2002 Andre Hedrick - * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc. + * Copyright (C) 2006-2007 MontaVista Software, Inc. * Copyright (C) 2007 Bartlomiej Zolnierkiewicz * * Portions Copyright (C) 1999 Promise Technology, Inc. @@ -227,19 +227,28 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive) return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ } -static void pdc202xx_reset(ide_drive_t *drive) +static void pdc202xx_reset_host (ide_hwif_t *hwif) { - ide_hwif_t *hwif = drive->hwif; unsigned long high_16 = hwif->extra_base - 16; u8 udma_speed_flag = inb(high_16 | 0x001f); - printk(KERN_WARNING "PDC202xx: software reset...\n"); - outb(udma_speed_flag | 0x10, high_16 | 0x001f); mdelay(100); outb(udma_speed_flag & ~0x10, high_16 | 0x001f); mdelay(2000); /* 2 seconds ?! */ + printk(KERN_WARNING "PDC202XX: %s channel reset.\n", + hwif->channel ? "Secondary" : "Primary"); +} + +static void pdc202xx_reset (ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + ide_hwif_t *mate = hwif->mate; + + pdc202xx_reset_host(hwif); + pdc202xx_reset_host(mate); + ide_set_max_pio(drive); } @@ -319,8 +328,9 @@ static const struct ide_dma_ops pdc20246_dma_ops = { .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = pdc202xx_dma_test_irq, - .dma_lost_irq = ide_dma_lost_irq, + .dma_lost_irq = pdc202xx_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, + .dma_clear = pdc202xx_reset, .dma_sff_read_status = ide_dma_sff_read_status, }; diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index bb37fb1b2d82..5d400aef8d9b 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q); static struct stripe_head * get_active_stripe(raid5_conf_t *conf, sector_t sector, - int previous, int noblock, int noquiesce) + int previous, int noblock) { struct stripe_head *sh; @@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, do { wait_event_lock_irq(conf->wait_for_stripe, - conf->quiesce == 0 || noquiesce, + conf->quiesce == 0, conf->device_lock, /* nothing */); sh = __find_stripe(conf, sector, conf->generation - previous); if (!sh) { @@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, sector_t bn = compute_blocknr(sh, i, 1); sector_t s = raid5_compute_sector(conf, bn, 0, &dd_idx, NULL); - sh2 = get_active_stripe(conf, s, 0, 1, 1); + sh2 = get_active_stripe(conf, s, 0, 1); if (sh2 == NULL) /* so far only the early blocks of this stripe * have been requested. When later blocks @@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh) /* Finish reconstruct operations initiated by the expansion process */ if (sh->reconstruct_state == reconstruct_state_result) { struct stripe_head *sh2 - = get_active_stripe(conf, sh->sector, 1, 1, 1); + = get_active_stripe(conf, sh->sector, 1, 1); if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { /* sh cannot be written until sh2 has been read. * so arrange for sh to be delayed a little @@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { struct stripe_head *sh2 - = get_active_stripe(conf, sh->sector, 1, 1, 1); + = get_active_stripe(conf, sh->sector, 1, 1); if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { /* sh cannot be written until sh2 has been read. * so arrange for sh to be delayed a little @@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev) int i; rcu_read_lock(); - for (i = 0; i < conf->raid_disks; i++) { + for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { struct request_queue *r_queue = bdev_get_queue(rdev->bdev); @@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi) (unsigned long long)logical_sector); sh = get_active_stripe(conf, new_sector, previous, - (bi->bi_rw&RWA_MASK), 0); + (bi->bi_rw&RWA_MASK)); if (sh) { if (unlikely(previous)) { /* expansion might have moved on while waiting for a @@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { int j; int skipped = 0; - sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); + sh = get_active_stripe(conf, stripe_addr+i, 0, 0); set_bit(STRIPE_EXPANDING, &sh->state); atomic_inc(&conf->reshape_stripes); /* If any of this stripe is beyond the end of the old @@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped raid5_compute_sector(conf, stripe_addr*(new_data_disks), 1, &dd_idx, NULL); last_sector = - raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) + raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) *(new_data_disks) - 1), 1, &dd_idx, NULL); if (last_sector >= mddev->dev_sectors) last_sector = mddev->dev_sectors - 1; while (first_sector <= last_sector) { - sh = get_active_stripe(conf, first_sector, 1, 0, 1); + sh = get_active_stripe(conf, first_sector, 1, 0); set_bit(STRIPE_EXPAND_SOURCE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); @@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski bitmap_cond_end_sync(mddev->bitmap, sector_nr); - sh = get_active_stripe(conf, sector_nr, 0, 1, 0); + sh = get_active_stripe(conf, sector_nr, 0, 1); if (sh == NULL) { - sh = get_active_stripe(conf, sector_nr, 0, 0, 0); + sh = get_active_stripe(conf, sector_nr, 0, 0); /* make sure we don't swamp the stripe cache if someone else * is trying to get access */ @@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski * We don't need to check the 'failed' flag as when that gets set, * recovery aborts. */ - for (i = 0; i < conf->raid_disks; i++) + for (i=0; iraid_disks; i++) if (conf->disks[i].rdev == NULL) still_degraded = 1; @@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) /* already done this stripe */ continue; - sh = get_active_stripe(conf, sector, 0, 1, 0); + sh = get_active_stripe(conf, sector, 0, 1); if (!sh) { /* failed to get a stripe - must wait */ diff --git a/trunk/drivers/media/video/ivtv/ivtv-queue.c b/trunk/drivers/media/video/ivtv/ivtv-queue.c index 7fde36e6d227..ff7b7deded4f 100644 --- a/trunk/drivers/media/video/ivtv/ivtv-queue.c +++ b/trunk/drivers/media/video/ivtv/ivtv-queue.c @@ -230,8 +230,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s) return -ENOMEM; } if (ivtv_might_use_dma(s)) { - s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, - sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE); + s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma); ivtv_stream_sync_for_cpu(s); } diff --git a/trunk/drivers/mmc/host/mvsdio.c b/trunk/drivers/mmc/host/mvsdio.c index b56d72ff06e9..c643d0fe118f 100644 --- a/trunk/drivers/mmc/host/mvsdio.c +++ b/trunk/drivers/mmc/host/mvsdio.c @@ -64,31 +64,6 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) unsigned int tmout; int tmout_index; - /* - * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE - * register is sometimes not set before a while when some - * "unusual" data block sizes are used (such as with the SWITCH - * command), even despite the fact that the XFER_DONE interrupt - * was raised. And if another data transfer starts before - * this bit comes to good sense (which eventually happens by - * itself) then the new transfer simply fails with a timeout. - */ - if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) { - unsigned long t = jiffies + HZ; - unsigned int hw_state, count = 0; - do { - if (time_after(jiffies, t)) { - dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); - break; - } - hw_state = mvsd_read(MVSD_HW_STATE); - count++; - } while (!(hw_state & (1 << 13))); - dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " - "(hw=0x%04x, count=%d, jiffies=%ld)\n", - hw_state, count, jiffies - (t - HZ)); - } - /* If timeout=0 then maximum timeout index is used. */ tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); tmout += data->timeout_clks; @@ -645,18 +620,9 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_width == MMC_BUS_WIDTH_4) ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; - /* - * The HI_SPEED_EN bit is causing trouble with many (but not all) - * high speed SD, SDHC and SDIO cards. Not enabling that bit - * makes all cards work. So let's just ignore that bit for now - * and revisit this issue if problems for not enabling this bit - * are ever reported. - */ -#if 0 if (ios->timing == MMC_TIMING_MMC_HS || ios->timing == MMC_TIMING_SD_HS) ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; -#endif host->ctrl = ctrl_reg; mvsd_write(MVSD_HOST_CTRL, ctrl_reg); @@ -916,4 +882,3 @@ module_param(nodma, int, 0); MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:mvsdio"); diff --git a/trunk/drivers/mmc/host/mxcmmc.c b/trunk/drivers/mmc/host/mxcmmc.c index f4cbe473670e..b4a615c55f28 100644 --- a/trunk/drivers/mmc/host/mxcmmc.c +++ b/trunk/drivers/mmc/host/mxcmmc.c @@ -140,8 +140,6 @@ struct mxcmci_host { struct work_struct datawork; }; -static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); - static inline int mxcmci_use_dma(struct mxcmci_host *host) { return host->do_dma; @@ -162,7 +160,7 @@ static void mxcmci_softreset(struct mxcmci_host *host) writew(0xff, host->base + MMC_REG_RES_TO); } -static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) +static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned int blksz = data->blksz; @@ -170,7 +168,6 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) #ifdef HAS_DMA struct scatterlist *sg; int i; - int ret; #endif if (data->flags & MMC_DATA_STREAM) nob = 0xffff; @@ -186,7 +183,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->offset & 3 || sg->length & 3) { host->do_dma = 0; - return 0; + return; } } @@ -195,30 +192,23 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); - ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, - datasize, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_READ); + imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, + host->res->start + MMC_REG_BUFFER_ACCESS, + DMA_MODE_READ); } else { host->dma_dir = DMA_TO_DEVICE; host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); - ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, - datasize, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_WRITE); + imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, + host->res->start + MMC_REG_BUFFER_ACCESS, + DMA_MODE_WRITE); } - if (ret) { - dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); - return ret; - } wmb(); imx_dma_enable(host->dma); #endif /* HAS_DMA */ - return 0; } static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, @@ -355,11 +345,8 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) stat = readl(host->base + MMC_REG_STATUS); if (stat & STATUS_ERR_MASK) return stat; - if (time_after(jiffies, timeout)) { - mxcmci_softreset(host); - mxcmci_set_clk_rate(host, host->clock); + if (time_after(jiffies, timeout)) return STATUS_TIME_OUT_READ; - } if (stat & mask) return 0; cpu_relax(); @@ -544,7 +531,6 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) { struct mxcmci_host *host = mmc_priv(mmc); unsigned int cmdat = host->cmdat; - int error; WARN_ON(host->req != NULL); @@ -554,12 +540,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) host->do_dma = 1; #endif if (req->data) { - error = mxcmci_setup_data(host, req->data); - if (error) { - req->cmd->error = error; - goto out; - } - + mxcmci_setup_data(host, req->data); cmdat |= CMD_DAT_CONT_DATA_ENABLE; @@ -567,9 +548,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) cmdat |= CMD_DAT_CONT_WRITE; } - error = mxcmci_start_cmd(host, req->cmd, cmdat); -out: - if (error) + if (mxcmci_start_cmd(host, req->cmd, cmdat)) mxcmci_finish_request(host, req); } @@ -745,9 +724,7 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_clk_put; } - mmc->f_min = clk_get_rate(host->clk) >> 16; - if (mmc->f_min < 400000) - mmc->f_min = 400000; + mmc->f_min = clk_get_rate(host->clk) >> 7; mmc->f_max = clk_get_rate(host->clk) >> 1; /* recommended in data sheet */ diff --git a/trunk/drivers/mmc/host/omap.c b/trunk/drivers/mmc/host/omap.c index dceb5ee3bda0..bfa25c01c872 100644 --- a/trunk/drivers/mmc/host/omap.c +++ b/trunk/drivers/mmc/host/omap.c @@ -822,7 +822,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) del_timer(&host->cmd_abort_timer); host->abort = 1; OMAP_MMC_WRITE(host, IE, 0); - disable_irq_nosync(host->irq); + disable_irq(host->irq); schedule_work(&host->cmd_abort_work); return IRQ_HANDLED; } diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c index c40cb96255a2..e62a22a7f00c 100644 --- a/trunk/drivers/mmc/host/omap_hsmmc.c +++ b/trunk/drivers/mmc/host/omap_hsmmc.c @@ -680,7 +680,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) host->dma_ch = -1; /* * DMA Callback: run in interrupt context. - * mutex_unlock will throw a kernel warning if used. + * mutex_unlock will through a kernel warning if used. */ up(&host->sem); } diff --git a/trunk/drivers/mmc/host/sdhci-of.c b/trunk/drivers/mmc/host/sdhci-of.c index 128c614d11aa..3ff4ac3abe8b 100644 --- a/trunk/drivers/mmc/host/sdhci-of.c +++ b/trunk/drivers/mmc/host/sdhci-of.c @@ -55,13 +55,7 @@ static u32 esdhc_readl(struct sdhci_host *host, int reg) static u16 esdhc_readw(struct sdhci_host *host, int reg) { - u16 ret; - - if (unlikely(reg == SDHCI_HOST_VERSION)) - ret = in_be16(host->ioaddr + reg); - else - ret = in_be16(host->ioaddr + (reg ^ 0x2)); - return ret; + return in_be16(host->ioaddr + (reg ^ 0x2)); } static u8 esdhc_readb(struct sdhci_host *host, int reg) @@ -283,7 +277,6 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev) static const struct of_device_id sdhci_of_match[] = { { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, - { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, { .compatible = "generic-sdhci", }, {}, }; diff --git a/trunk/drivers/mtd/nand/davinci_nand.c b/trunk/drivers/mtd/nand/davinci_nand.c index 02700f769b8a..0119220de7d0 100644 --- a/trunk/drivers/mtd/nand/davinci_nand.c +++ b/trunk/drivers/mtd/nand/davinci_nand.c @@ -407,17 +407,16 @@ static int __init nand_davinci_probe(struct platform_device *pdev) } info->chip.ecc.mode = ecc_mode; - info->clk = clk_get(&pdev->dev, "aemif"); + info->clk = clk_get(&pdev->dev, "AEMIFCLK"); if (IS_ERR(info->clk)) { ret = PTR_ERR(info->clk); - dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); + dev_dbg(&pdev->dev, "unable to get AEMIFCLK, err %d\n", ret); goto err_clk; } ret = clk_enable(info->clk); if (ret < 0) { - dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", - ret); + dev_dbg(&pdev->dev, "unable to enable AEMIFCLK, err %d\n", ret); goto err_clk_enable; } diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c index fffb006b7d95..b1419e21b46b 100644 --- a/trunk/drivers/net/e1000/e1000_main.c +++ b/trunk/drivers/net/e1000/e1000_main.c @@ -4027,9 +4027,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, PCI_DMA_FROMDEVICE); length = le16_to_cpu(rx_desc->length); - /* !EOP means multiple descriptors were used to store a single - * packet, also make sure the frame isn't just CRC only */ - if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { + + if (unlikely(!(status & E1000_RXD_STAT_EOP))) { /* All receives must fit into a single buffer */ E1000_DBG("%s: Receive packet consumed multiple" " buffers\n", netdev->name); diff --git a/trunk/drivers/net/forcedeth.c b/trunk/drivers/net/forcedeth.c index 9f6a68fb7b45..f9a846b1b92f 100644 --- a/trunk/drivers/net/forcedeth.c +++ b/trunk/drivers/net/forcedeth.c @@ -897,12 +897,6 @@ enum { }; static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; -/* - * Power down phy when interface is down (persists through reboot; - * older Linux and other OSes may not power it up again) - */ -static int phy_power_down = 0; - static inline struct fe_priv *get_nvpriv(struct net_device *dev) { return netdev_priv(dev); @@ -1491,10 +1485,7 @@ static int phy_init(struct net_device *dev) /* restart auto negotiation, power down phy */ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); - mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); - if (phy_power_down) { - mii_control |= BMCR_PDOWN; - } + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN); if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { return PHY_ERROR; } @@ -5522,7 +5513,7 @@ static int nv_close(struct net_device *dev) nv_drain_rxtx(dev); - if (np->wolenabled || !phy_power_down) { + if (np->wolenabled) { writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); nv_start_rx(dev); } else { @@ -6376,8 +6367,6 @@ module_param(dma_64bit, int, 0); MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); module_param(phy_cross, int, 0); MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); -module_param(phy_power_down, int, 0); -MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); MODULE_AUTHOR("Manfred Spraul "); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); diff --git a/trunk/drivers/net/r8169.c b/trunk/drivers/net/r8169.c index 3b19e0ce290f..8247a945a1d9 100644 --- a/trunk/drivers/net/r8169.c +++ b/trunk/drivers/net/r8169.c @@ -66,6 +66,7 @@ static const int multicast_filter_limit = 32; #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ #define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ +#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -2356,10 +2357,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) return cmd; } -static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +static void rtl_set_rx_max_size(void __iomem *ioaddr) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(RxMaxSize, rx_buf_sz); + RTL_W16(RxMaxSize, 16383); } static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) @@ -2406,7 +2407,7 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); + rtl_set_rx_max_size(ioaddr); if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || (tp->mac_version == RTL_GIGA_MAC_VER_02) || @@ -2667,7 +2668,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); + rtl_set_rx_max_size(ioaddr); tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; @@ -2845,7 +2846,7 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(EarlyTxThres, EarlyTxThld); - rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); + rtl_set_rx_max_size(ioaddr); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; diff --git a/trunk/drivers/parisc/iosapic.c b/trunk/drivers/parisc/iosapic.c index 4a9cc92d4d18..73348c4047e9 100644 --- a/trunk/drivers/parisc/iosapic.c +++ b/trunk/drivers/parisc/iosapic.c @@ -702,7 +702,7 @@ static unsigned int iosapic_startup_irq(unsigned int irq) } #ifdef CONFIG_SMP -static int iosapic_set_affinity_irq(unsigned int irq, +static void iosapic_set_affinity_irq(unsigned int irq, const struct cpumask *dest) { struct vector_info *vi = iosapic_get_vector(irq); @@ -712,7 +712,7 @@ static int iosapic_set_affinity_irq(unsigned int irq, dest_cpu = cpu_check_affinity(irq, dest); if (dest_cpu < 0) - return -1; + return; cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu)); vi->txn_addr = txn_affinity_addr(irq, dest_cpu); @@ -724,8 +724,6 @@ static int iosapic_set_affinity_irq(unsigned int irq, iosapic_set_irt_data(vi, &dummy_d0, &d1); iosapic_wr_irt_entry(vi, d0, d1); spin_unlock_irqrestore(&iosapic_lock, flags); - - return 0; } #endif diff --git a/trunk/drivers/parport/share.c b/trunk/drivers/parport/share.c index dffa5d4fb298..0ebca450ed29 100644 --- a/trunk/drivers/parport/share.c +++ b/trunk/drivers/parport/share.c @@ -614,10 +614,7 @@ parport_register_device(struct parport *port, const char *name, * pardevice fields. -arca */ port->ops->init_state(tmp, tmp->state); - if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { - port->proc_device = tmp; - parport_device_proc_register(tmp); - } + parport_device_proc_register(tmp); return tmp; out_free_all: @@ -649,13 +646,9 @@ void parport_unregister_device(struct pardevice *dev) } #endif - port = dev->port->physport; + parport_device_proc_unregister(dev); - if (port->proc_device == dev) { - port->proc_device = NULL; - clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags); - parport_device_proc_unregister(dev); - } + port = dev->port->physport; if (port->cad == dev) { printk(KERN_DEBUG "%s: %s forgot to release port\n", diff --git a/trunk/drivers/pci/hotplug/ibmphp_core.c b/trunk/drivers/pci/hotplug/ibmphp_core.c index 42e4260c3b12..dd18f857dfb0 100644 --- a/trunk/drivers/pci/hotplug/ibmphp_core.c +++ b/trunk/drivers/pci/hotplug/ibmphp_core.c @@ -153,47 +153,45 @@ int ibmphp_init_devno(struct slot **cur_slot) return -1; } for (loop = 0; loop < len; loop++) { - if ((*cur_slot)->number == rtable->slots[loop].slot && - (*cur_slot)->bus == rtable->slots[loop].bus) { - struct io_apic_irq_attr irq_attr; - + if ((*cur_slot)->number == rtable->slots[loop].slot) { + if ((*cur_slot)->bus == rtable->slots[loop].bus) { (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); for (i = 0; i < 4; i++) (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, - (int) (*cur_slot)->device, i, - &irq_attr); - - debug("(*cur_slot)->irq[0] = %x\n", - (*cur_slot)->irq[0]); - debug("(*cur_slot)->irq[1] = %x\n", - (*cur_slot)->irq[1]); - debug("(*cur_slot)->irq[2] = %x\n", - (*cur_slot)->irq[2]); - debug("(*cur_slot)->irq[3] = %x\n", - (*cur_slot)->irq[3]); - - debug("rtable->exlusive_irqs = %x\n", + (int) (*cur_slot)->device, i); + + debug("(*cur_slot)->irq[0] = %x\n", + (*cur_slot)->irq[0]); + debug("(*cur_slot)->irq[1] = %x\n", + (*cur_slot)->irq[1]); + debug("(*cur_slot)->irq[2] = %x\n", + (*cur_slot)->irq[2]); + debug("(*cur_slot)->irq[3] = %x\n", + (*cur_slot)->irq[3]); + + debug("rtable->exlusive_irqs = %x\n", rtable->exclusive_irqs); - debug("rtable->slots[loop].irq[0].bitmap = %x\n", + debug("rtable->slots[loop].irq[0].bitmap = %x\n", rtable->slots[loop].irq[0].bitmap); - debug("rtable->slots[loop].irq[1].bitmap = %x\n", + debug("rtable->slots[loop].irq[1].bitmap = %x\n", rtable->slots[loop].irq[1].bitmap); - debug("rtable->slots[loop].irq[2].bitmap = %x\n", + debug("rtable->slots[loop].irq[2].bitmap = %x\n", rtable->slots[loop].irq[2].bitmap); - debug("rtable->slots[loop].irq[3].bitmap = %x\n", + debug("rtable->slots[loop].irq[3].bitmap = %x\n", rtable->slots[loop].irq[3].bitmap); - debug("rtable->slots[loop].irq[0].link = %x\n", + debug("rtable->slots[loop].irq[0].link = %x\n", rtable->slots[loop].irq[0].link); - debug("rtable->slots[loop].irq[1].link = %x\n", + debug("rtable->slots[loop].irq[1].link = %x\n", rtable->slots[loop].irq[1].link); - debug("rtable->slots[loop].irq[2].link = %x\n", + debug("rtable->slots[loop].irq[2].link = %x\n", rtable->slots[loop].irq[2].link); - debug("rtable->slots[loop].irq[3].link = %x\n", + debug("rtable->slots[loop].irq[3].link = %x\n", rtable->slots[loop].irq[3].link); - debug("end of init_devno\n"); - kfree(rtable); - return 0; + debug("end of init_devno\n"); + kfree(rtable); + return 0; + } } } diff --git a/trunk/drivers/pci/htirq.c b/trunk/drivers/pci/htirq.c index 737a1c44b07a..6808d8333ecc 100644 --- a/trunk/drivers/pci/htirq.c +++ b/trunk/drivers/pci/htirq.c @@ -98,7 +98,6 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) int max_irq; int pos; int irq; - int node; pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); if (!pos) @@ -126,8 +125,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) cfg->msg.address_lo = 0xffffffff; cfg->msg.address_hi = 0xffffffff; - node = dev_to_node(&dev->dev); - irq = create_irq_nr(0, node); + irq = create_irq(); if (irq <= 0) { kfree(cfg); diff --git a/trunk/drivers/pci/intel-iommu.c b/trunk/drivers/pci/intel-iommu.c index cd389162735f..a563fbe559d0 100644 --- a/trunk/drivers/pci/intel-iommu.c +++ b/trunk/drivers/pci/intel-iommu.c @@ -1972,6 +1972,15 @@ static int __init init_dmars(void) } } +#ifdef CONFIG_INTR_REMAP + if (!intr_remapping_enabled) { + ret = enable_intr_remapping(0); + if (ret) + printk(KERN_ERR + "IOMMU: enable interrupt remapping failed\n"); + } +#endif + /* * For each rmrr * for each dev attached to rmrr diff --git a/trunk/drivers/pci/intr_remapping.c b/trunk/drivers/pci/intr_remapping.c index 3a0cb0bb0593..f5e0ea724a6f 100644 --- a/trunk/drivers/pci/intr_remapping.c +++ b/trunk/drivers/pci/intr_remapping.c @@ -15,14 +15,6 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static int ir_ioapic_num; int intr_remapping_enabled; -static int disable_intremap; -static __init int setup_nointremap(char *str) -{ - disable_intremap = 1; - return 0; -} -early_param("nointremap", setup_nointremap); - struct irq_2_iommu { struct intel_iommu *iommu; u16 irte_index; @@ -31,12 +23,15 @@ struct irq_2_iommu { }; #ifdef CONFIG_GENERIC_HARDIRQS -static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) +static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) { struct irq_2_iommu *iommu; + int node; + + node = cpu_to_node(cpu); iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); - printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); + printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); return iommu; } @@ -53,7 +48,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq) return desc->irq_2_iommu; } -static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) +static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) { struct irq_desc *desc; struct irq_2_iommu *irq_iommu; @@ -61,7 +56,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) /* * alloc irq desc if not allocated already. */ - desc = irq_to_desc_alloc_node(irq, node); + desc = irq_to_desc_alloc_cpu(irq, cpu); if (!desc) { printk(KERN_INFO "can not get irq_desc for %d\n", irq); return NULL; @@ -70,14 +65,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) irq_iommu = desc->irq_2_iommu; if (!irq_iommu) - desc->irq_2_iommu = get_one_free_irq_2_iommu(node); + desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); return desc->irq_2_iommu; } static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) { - return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id)); + return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); } #else /* !CONFIG_SPARSE_IRQ */ @@ -428,6 +423,20 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) readl, (sts & DMA_GSTS_IRTPS), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); + if (mode == 0) { + spin_lock_irqsave(&iommu->register_lock, flags); + + /* enable comaptiblity format interrupt pass through */ + cmd = iommu->gcmd | DMA_GCMD_CFI; + iommu->gcmd |= DMA_GCMD_CFI; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_CFIS), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flags); + } + /* * global invalidation of interrupt entry cache before enabling * interrupt-remapping. @@ -507,23 +516,6 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) spin_unlock_irqrestore(&iommu->register_lock, flags); } -int __init intr_remapping_supported(void) -{ - struct dmar_drhd_unit *drhd; - - if (disable_intremap) - return 0; - - for_each_drhd_unit(drhd) { - struct intel_iommu *iommu = drhd->iommu; - - if (!ecap_ir_support(iommu->ecap)) - return 0; - } - - return 1; -} - int __init enable_intr_remapping(int eim) { struct dmar_drhd_unit *drhd; diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index f1ae2475ffff..e3c3e081b834 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -745,8 +745,6 @@ int pci_setup_device(struct pci_dev *dev) /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); - /* device class may be changed after fixup */ - class = dev->class >> 8; switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ diff --git a/trunk/drivers/pnp/pnpacpi/rsparser.c b/trunk/drivers/pnp/pnpacpi/rsparser.c index 7f207f335bec..adf17856bacc 100644 --- a/trunk/drivers/pnp/pnpacpi/rsparser.c +++ b/trunk/drivers/pnp/pnpacpi/rsparser.c @@ -123,7 +123,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev, } flags = irq_flags(triggering, polarity, shareable); - irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); + irq = acpi_register_gsi(gsi, triggering, polarity); if (irq >= 0) pcibios_penalize_isa_irq(irq, 1); else diff --git a/trunk/drivers/serial/imx.c b/trunk/drivers/serial/imx.c index 5f0be40dfdab..9f460b175c50 100644 --- a/trunk/drivers/serial/imx.c +++ b/trunk/drivers/serial/imx.c @@ -1031,8 +1031,6 @@ imx_console_setup(struct console *co, char *options) if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) co->index = 0; sport = imx_ports[co->index]; - if(sport == NULL) - return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); diff --git a/trunk/drivers/ssb/embedded.c b/trunk/drivers/ssb/embedded.c index a0e0d246b592..7dc3a6b41397 100644 --- a/trunk/drivers/ssb/embedded.c +++ b/trunk/drivers/ssb/embedded.c @@ -29,7 +29,6 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks) } return -ENODEV; } -EXPORT_SYMBOL(ssb_watchdog_timer_set); u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) { diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index be437c2bc942..30963af5dba0 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -335,7 +335,7 @@ static int find_unbound_irq(void) if (irq == nr_irqs) panic("No available IRQ to bind to: increase nr_irqs!\n"); - desc = irq_to_desc_alloc_node(irq, 0); + desc = irq_to_desc_alloc_cpu(irq, 0); if (WARN_ON(desc == NULL)) return -1; @@ -688,13 +688,13 @@ void rebind_evtchn_irq(int evtchn, int irq) } /* Rebind an evtchn so that it gets delivered to a specific cpu */ -static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) +static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) { struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); if (!VALID_EVTCHN(evtchn)) - return -1; + return; /* Send future instances of this interrupt to other vcpu. */ bind_vcpu.port = evtchn; @@ -707,15 +707,13 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); - - return 0; } -static int set_affinity_irq(unsigned irq, const struct cpumask *dest) + +static void set_affinity_irq(unsigned irq, const struct cpumask *dest) { unsigned tcpu = cpumask_first(dest); - - return rebind_irq_to_cpu(irq, tcpu); + rebind_irq_to_cpu(irq, tcpu); } int resend_irq_on_evtchn(unsigned int irq) diff --git a/trunk/fs/autofs4/waitq.c b/trunk/fs/autofs4/waitq.c index 2341375386f8..eeb246845909 100644 --- a/trunk/fs/autofs4/waitq.c +++ b/trunk/fs/autofs4/waitq.c @@ -297,14 +297,20 @@ static int validate_request(struct autofs_wait_queue **wait, */ if (notify == NFY_MOUNT) { /* - * If the dentry was successfully mounted while we slept - * on the wait queue mutex we can return success. If it - * isn't mounted (doesn't have submounts for the case of - * a multi-mount with no mount at it's base) we can - * continue on and create a new request. - */ - if (have_submounts(dentry)) - return 0; + * If the dentry isn't hashed just go ahead and try the + * mount again with a new wait (not much else we can do). + */ + if (!d_unhashed(dentry)) { + /* + * But if the dentry is hashed, that means that we + * got here through the revalidate path. Thus, we + * need to check if the dentry has been mounted + * while we waited on the wq_mutex. If it has, + * simply return success. + */ + if (d_mountpoint(dentry)) + return 0; + } } return 1; diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 35af93355063..3e2c7c738f23 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -2622,18 +2622,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, search_start); if (block_group && block_group_bits(block_group, data)) { down_read(&space_info->groups_sem); - if (list_empty(&block_group->list) || - block_group->ro) { - /* - * someone is removing this block group, - * we can't jump into the have_block_group - * target because our list pointers are not - * valid - */ - btrfs_put_block_group(block_group); - up_read(&space_info->groups_sem); - } else - goto have_block_group; + goto have_block_group; } else if (block_group) { btrfs_put_block_group(block_group); } @@ -2667,13 +2656,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, * people trying to start a new cluster */ spin_lock(&last_ptr->refill_lock); - if (last_ptr->block_group && - (last_ptr->block_group->ro || - !block_group_bits(last_ptr->block_group, data))) { - offset = 0; - goto refill_cluster; - } - offset = btrfs_alloc_from_cluster(block_group, last_ptr, num_bytes, search_start); if (offset) { @@ -2699,17 +2681,10 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, last_ptr_loop = 1; search_start = block_group->key.objectid; - /* - * we know this block group is properly - * in the list because - * btrfs_remove_block_group, drops the - * cluster before it removes the block - * group from the list - */ goto have_block_group; } spin_unlock(&last_ptr->lock); -refill_cluster: + /* * this cluster didn't work out, free it and * start over @@ -5993,7 +5968,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, { struct btrfs_path *path; struct btrfs_block_group_cache *block_group; - struct btrfs_free_cluster *cluster; struct btrfs_key key; int ret; @@ -6005,21 +5979,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, memcpy(&key, &block_group->key, sizeof(key)); - /* make sure this block group isn't part of an allocation cluster */ - cluster = &root->fs_info->data_alloc_cluster; - spin_lock(&cluster->refill_lock); - btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&cluster->refill_lock); - - /* - * make sure this block group isn't part of a metadata - * allocation cluster - */ - cluster = &root->fs_info->meta_alloc_cluster; - spin_lock(&cluster->refill_lock); - btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&cluster->refill_lock); - path = btrfs_alloc_path(); BUG_ON(!path); @@ -6029,11 +5988,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, spin_unlock(&root->fs_info->block_group_cache_lock); btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); - /* - * we must use list_del_init so people can check to see if they - * are still on the list after taking the semaphore - */ - list_del_init(&block_group->list); + list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); spin_lock(&block_group->space_info->lock); diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index a6d35b0054ca..5f01dad4b696 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -1440,7 +1440,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->io_align = root->sectorsize; device->sector_size = root->sectorsize; device->total_bytes = i_size_read(bdev->bd_inode); - device->disk_total_bytes = device->total_bytes; device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index 49106127a4aa..aed297739eb0 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -2736,8 +2736,6 @@ int nobh_truncate_page(struct address_space *mapping, pos += blocksize; } - map_bh.b_size = blocksize; - map_bh.b_state = 0; err = get_block(inode, iblock, &map_bh, 0); if (err) goto unlock; diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c index bca0c618fdb3..0571983755dc 100644 --- a/trunk/fs/inode.c +++ b/trunk/fs/inode.c @@ -219,7 +219,6 @@ static struct inode *alloc_inode(struct super_block *sb) void destroy_inode(struct inode *inode) { BUG_ON(inode_has_buffers(inode)); - ima_inode_free(inode); security_inode_free(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); @@ -1054,22 +1053,13 @@ int insert_inode_locked(struct inode *inode) struct super_block *sb = inode->i_sb; ino_t ino = inode->i_ino; struct hlist_head *head = inode_hashtable + hash(sb, ino); + struct inode *old; inode->i_state |= I_LOCK|I_NEW; while (1) { - struct hlist_node *node; - struct inode *old = NULL; spin_lock(&inode_lock); - hlist_for_each_entry(old, node, head, i_hash) { - if (old->i_ino != ino) - continue; - if (old->i_sb != sb) - continue; - if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) - continue; - break; - } - if (likely(!node)) { + old = find_inode_fast(sb, head, ino); + if (likely(!old)) { hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); return 0; @@ -1091,24 +1081,14 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, { struct super_block *sb = inode->i_sb; struct hlist_head *head = inode_hashtable + hash(sb, hashval); + struct inode *old; inode->i_state |= I_LOCK|I_NEW; while (1) { - struct hlist_node *node; - struct inode *old = NULL; - spin_lock(&inode_lock); - hlist_for_each_entry(old, node, head, i_hash) { - if (old->i_sb != sb) - continue; - if (!test(old, data)) - continue; - if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) - continue; - break; - } - if (likely(!node)) { + old = find_inode(sb, head, test, data); + if (likely(!old)) { hlist_add_head(&inode->i_hash, head); spin_unlock(&inode_lock); return 0; diff --git a/trunk/fs/jbd/commit.c b/trunk/fs/jbd/commit.c index 618e21c0b7a3..06560c520f49 100644 --- a/trunk/fs/jbd/commit.c +++ b/trunk/fs/jbd/commit.c @@ -241,7 +241,7 @@ static int journal_submit_data_buffers(journal_t *journal, spin_lock(&journal->j_list_lock); } /* Someone already cleaned up the buffer? */ - if (!buffer_jbd(bh) || bh2jh(bh) != jh + if (!buffer_jbd(bh) || jh->b_transaction != commit_transaction || jh->b_jlist != BJ_SyncData) { jbd_unlock_bh_state(bh); @@ -478,9 +478,7 @@ void journal_commit_transaction(journal_t *journal) spin_lock(&journal->j_list_lock); continue; } - if (buffer_jbd(bh) && bh2jh(bh) == jh && - jh->b_transaction == commit_transaction && - jh->b_jlist == BJ_Locked) { + if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) { __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); diff --git a/trunk/fs/proc/loadavg.c b/trunk/fs/proc/loadavg.c index 1afa4dd4cae2..9bca39cf99ee 100644 --- a/trunk/fs/proc/loadavg.c +++ b/trunk/fs/proc/loadavg.c @@ -12,14 +12,20 @@ static int loadavg_proc_show(struct seq_file *m, void *v) { - unsigned long avnrun[3]; + int a, b, c; + unsigned long seq; - get_avenrun(avnrun, FIXED_1/200, 0); + do { + seq = read_seqbegin(&xtime_lock); + a = avenrun[0] + (FIXED_1/200); + b = avenrun[1] + (FIXED_1/200); + c = avenrun[2] + (FIXED_1/200); + } while (read_seqretry(&xtime_lock, seq)); - seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n", - LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), - LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), - LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), + seq_printf(m, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", + LOAD_INT(a), LOAD_FRAC(a), + LOAD_INT(b), LOAD_FRAC(b), + LOAD_INT(c), LOAD_FRAC(c), nr_running(), nr_threads, task_active_pid_ns(current)->last_pid); return 0; diff --git a/trunk/fs/xfs/linux-2.6/kmem.h b/trunk/fs/xfs/linux-2.6/kmem.h index 179cbd630f69..af6843c7ee4b 100644 --- a/trunk/fs/xfs/linux-2.6/kmem.h +++ b/trunk/fs/xfs/linux-2.6/kmem.h @@ -103,7 +103,7 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); static inline int kmem_shake_allow(gfp_t gfp_mask) { - return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); + return (gfp_mask & __GFP_WAIT) != 0; } #endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/trunk/fs/xfs/xfs_dfrag.c b/trunk/fs/xfs/xfs_dfrag.c index 7465f9ee125f..e6d839bddbf0 100644 --- a/trunk/fs/xfs/xfs_dfrag.c +++ b/trunk/fs/xfs/xfs_dfrag.c @@ -347,14 +347,12 @@ xfs_swap_extents( error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT); -out: - kmem_free(tempifp); - return error; - out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - goto out; +out: + kmem_free(tempifp); + return error; out_trans_cancel: xfs_trans_cancel(tp, 0); diff --git a/trunk/fs/xfs/xfs_fsops.c b/trunk/fs/xfs/xfs_fsops.c index cbd451bb4848..8379e3bca26c 100644 --- a/trunk/fs/xfs/xfs_fsops.c +++ b/trunk/fs/xfs/xfs_fsops.c @@ -160,7 +160,7 @@ xfs_growfs_data_private( nagcount = new + (nb_mod != 0); if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { nagcount--; - nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; + nb = nagcount * mp->m_sb.sb_agblocks; if (nb < mp->m_sb.sb_dblocks) return XFS_ERROR(EINVAL); } diff --git a/trunk/include/drm/drm_crtc.h b/trunk/include/drm/drm_crtc.h index 7300fb866767..3c1924c010e8 100644 --- a/trunk/include/drm/drm_crtc.h +++ b/trunk/include/drm/drm_crtc.h @@ -471,9 +471,6 @@ struct drm_connector { u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; - /* requested DPMS state */ - int dpms; - void *helper_private; uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; diff --git a/trunk/include/drm/drm_crtc_helper.h b/trunk/include/drm/drm_crtc_helper.h index 6769ff6c1bc0..ec073d8288d9 100644 --- a/trunk/include/drm/drm_crtc_helper.h +++ b/trunk/include/drm/drm_crtc_helper.h @@ -99,8 +99,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_framebuffer *old_fb); extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); -extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); - extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, struct drm_mode_fb_cmd *mode_cmd); diff --git a/trunk/include/linux/acpi.h b/trunk/include/linux/acpi.h index 51b4b0a5ce8c..88be890ee3c7 100644 --- a/trunk/include/linux/acpi.h +++ b/trunk/include/linux/acpi.h @@ -119,7 +119,7 @@ extern int pci_mmcfg_config_num; extern int sbf_port; extern unsigned long acpi_realmode_flags; -int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); +int acpi_register_gsi (u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); #ifdef CONFIG_X86_IO_APIC diff --git a/trunk/include/linux/amba/serial.h b/trunk/include/linux/amba/serial.h index 64a982ea5d5f..48ee32a18ac5 100644 --- a/trunk/include/linux/amba/serial.h +++ b/trunk/include/linux/amba/serial.h @@ -159,7 +159,6 @@ #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) #ifndef __ASSEMBLY__ -struct amba_device; /* in uncompress this is included but amba/bus.h is not */ struct amba_pl010_data { void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); }; diff --git a/trunk/include/linux/cpumask.h b/trunk/include/linux/cpumask.h index c5ac87ca7bc6..9f315382610b 100644 --- a/trunk/include/linux/cpumask.h +++ b/trunk/include/linux/cpumask.h @@ -1022,8 +1022,6 @@ typedef struct cpumask *cpumask_var_t; bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); -bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); -bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); @@ -1042,19 +1040,6 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, return true; } -static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) -{ - cpumask_clear(*mask); - return true; -} - -static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, - int node) -{ - cpumask_clear(*mask); - return true; -} - static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } diff --git a/trunk/include/linux/dmar.h b/trunk/include/linux/dmar.h index 10ff5c498824..e397dc342cda 100644 --- a/trunk/include/linux/dmar.h +++ b/trunk/include/linux/dmar.h @@ -108,7 +108,6 @@ struct irte { }; #ifdef CONFIG_INTR_REMAP extern int intr_remapping_enabled; -extern int intr_remapping_supported(void); extern int enable_intr_remapping(int); extern void disable_intr_remapping(void); extern int reenable_intr_remapping(int); @@ -158,8 +157,6 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic) } #define irq_remapped(irq) (0) #define enable_intr_remapping(mode) (-1) -#define disable_intr_remapping() (0) -#define reenable_intr_remapping(mode) (0) #define intr_remapping_enabled (0) #endif diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index ff374ceface0..91bb76f44f14 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -566,6 +566,6 @@ struct irq_desc; extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); -extern int arch_init_chip_data(struct irq_desc *desc, int node); +extern int arch_init_chip_data(struct irq_desc *desc, int cpu); #endif diff --git a/trunk/include/linux/irq.h b/trunk/include/linux/irq.h index eedbb8e5e0cc..b7cbeed972e4 100644 --- a/trunk/include/linux/irq.h +++ b/trunk/include/linux/irq.h @@ -117,7 +117,7 @@ struct irq_chip { void (*eoi)(unsigned int irq); void (*end)(unsigned int irq); - int (*set_affinity)(unsigned int irq, + void (*set_affinity)(unsigned int irq, const struct cpumask *dest); int (*retrigger)(unsigned int irq); int (*set_type)(unsigned int irq, unsigned int flow_type); @@ -187,7 +187,7 @@ struct irq_desc { spinlock_t lock; #ifdef CONFIG_SMP cpumask_var_t affinity; - unsigned int node; + unsigned int cpu; #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_var_t pending_mask; #endif @@ -201,23 +201,26 @@ struct irq_desc { } ____cacheline_internodealigned_in_smp; extern void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int node); + struct irq_desc *desc, int cpu); extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); #ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; -#endif +#else /* CONFIG_SPARSE_IRQ */ +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); +#endif /* CONFIG_SPARSE_IRQ */ -#ifdef CONFIG_NUMA_IRQ_DESC -extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); -#else -static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); + +static inline struct irq_desc * +irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) { +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + return irq_to_desc(irq); +#else return desc; -} #endif - -extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); +} /* * Migration helpers for obsolete names, they will go away: @@ -383,7 +386,7 @@ extern void set_irq_noprobe(unsigned int irq); extern void set_irq_probe(unsigned int irq); /* Handle dynamic irq creation and destruction */ -extern unsigned int create_irq_nr(unsigned int irq_want, int node); +extern unsigned int create_irq_nr(unsigned int irq_want); extern int create_irq(void); extern void destroy_irq(unsigned int irq); @@ -421,46 +424,45 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #ifdef CONFIG_SMP /** - * alloc_desc_masks - allocate cpumasks for irq_desc + * init_alloc_desc_masks - allocate cpumasks for irq_desc * @desc: pointer to irq_desc struct * @cpu: cpu which will be handling the cpumasks * @boot: true if need bootmem * * Allocates affinity and pending_mask cpumask if required. * Returns true if successful (or not required). + * Side effect: affinity has all bits set, pending_mask has all bits clear. */ -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, +static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, bool boot) { -#ifdef CONFIG_CPUMASK_OFFSTACK + int node; + if (boot) { alloc_bootmem_cpumask_var(&desc->affinity); + cpumask_setall(desc->affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ alloc_bootmem_cpumask_var(&desc->pending_mask); + cpumask_clear(desc->pending_mask); #endif return true; } + node = cpu_to_node(cpu); + if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) return false; + cpumask_setall(desc->affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { free_cpumask_var(desc->affinity); return false; } -#endif -#endif - return true; -} - -static inline void init_desc_masks(struct irq_desc *desc) -{ - cpumask_setall(desc->affinity); -#ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif + return true; } /** @@ -476,7 +478,7 @@ static inline void init_desc_masks(struct irq_desc *desc) static inline void init_copy_desc_masks(struct irq_desc *old_desc, struct irq_desc *new_desc) { -#ifdef CONFIG_CPUMASK_OFFSTACK +#ifdef CONFIG_CPUMASKS_OFFSTACK cpumask_copy(new_desc->affinity, old_desc->affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ @@ -497,16 +499,12 @@ static inline void free_desc_masks(struct irq_desc *old_desc, #else /* !CONFIG_SMP */ -static inline bool alloc_desc_masks(struct irq_desc *desc, int node, +static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, bool boot) { return true; } -static inline void init_desc_masks(struct irq_desc *desc) -{ -} - static inline void init_copy_desc_masks(struct irq_desc *old_desc, struct irq_desc *new_desc) { diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h index 511b09867096..bff1f0d475c7 100644 --- a/trunk/include/linux/mm.h +++ b/trunk/include/linux/mm.h @@ -1031,6 +1031,8 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void remove_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); +extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn); extern void remove_all_active_ranges(void); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); diff --git a/trunk/include/linux/parport.h b/trunk/include/linux/parport.h index 38a423ed3c01..e1f83c5065c5 100644 --- a/trunk/include/linux/parport.h +++ b/trunk/include/linux/parport.h @@ -324,10 +324,6 @@ struct parport { int spintime; atomic_t ref_count; - unsigned long devflags; -#define PARPORT_DEVPROC_REGISTERED 0 - struct pardevice *proc_device; /* Currently register proc device */ - struct list_head full_list; struct parport *slaves[3]; }; diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index dbb1043e8656..b4c38bc8049c 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -116,7 +116,6 @@ struct fs_struct; * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ -extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<pending.signal, SIGSTOP); set_tsk_thread_flag(child, TIF_SIGPENDING); diff --git a/trunk/include/linux/wait.h b/trunk/include/linux/wait.h index 6788e1a4d4ca..bc024632f365 100644 --- a/trunk/include/linux/wait.h +++ b/trunk/include/linux/wait.h @@ -132,6 +132,8 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, list_del(&old->task_list); } +void __wake_up_common(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, int sync, void *key); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, diff --git a/trunk/ipc/shm.c b/trunk/ipc/shm.c index 425971600485..faa46da99ebe 100644 --- a/trunk/ipc/shm.c +++ b/trunk/ipc/shm.c @@ -969,13 +969,10 @@ SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *next; unsigned long addr = (unsigned long)shmaddr; - int retval = -EINVAL; -#ifdef CONFIG_MMU loff_t size = 0; - struct vm_area_struct *next; -#endif + int retval = -EINVAL; if (addr & ~PAGE_MASK) return retval; diff --git a/trunk/kernel/async.c b/trunk/kernel/async.c index 27235f5de198..50540301ed0f 100644 --- a/trunk/kernel/async.c +++ b/trunk/kernel/async.c @@ -92,18 +92,23 @@ extern int initcall_debug; static async_cookie_t __lowest_in_progress(struct list_head *running) { struct async_entry *entry; + async_cookie_t ret = next_cookie; /* begin with "infinity" value */ if (!list_empty(running)) { entry = list_first_entry(running, struct async_entry, list); - return entry->cookie; + ret = entry->cookie; } - list_for_each_entry(entry, &async_pending, list) - if (entry->running == running) - return entry->cookie; + if (!list_empty(&async_pending)) { + list_for_each_entry(entry, &async_pending, list) + if (entry->running == running) { + ret = entry->cookie; + break; + } + } - return next_cookie; /* "infinity" value */ + return ret; } static async_cookie_t lowest_in_progress(struct list_head *running) diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 875ffbdd96d0..b9e2edd00726 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -1409,7 +1409,7 @@ long do_fork(unsigned long clone_flags, } audit_finish_fork(p); - tracehook_report_clone(regs, clone_flags, nr, p); + tracehook_report_clone(trace, regs, clone_flags, nr, p); /* * We set PF_STARTING at creation in case tracing wants to diff --git a/trunk/kernel/irq/Makefile b/trunk/kernel/irq/Makefile index 7d047808419d..3394f8f52964 100644 --- a/trunk/kernel/irq/Makefile +++ b/trunk/kernel/irq/Makefile @@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o -obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o +obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o obj-$(CONFIG_PM_SLEEP) += pm.o diff --git a/trunk/kernel/irq/chip.c b/trunk/kernel/irq/chip.c index 13c68e71b726..c687ba4363f2 100644 --- a/trunk/kernel/irq/chip.c +++ b/trunk/kernel/irq/chip.c @@ -359,6 +359,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) spin_lock(&desc->lock); mask_ack_irq(desc, irq); + desc = irq_remap_to_desc(irq, desc); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; @@ -437,6 +438,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) desc->status &= ~IRQ_INPROGRESS; out: desc->chip->eoi(irq); + desc = irq_remap_to_desc(irq, desc); spin_unlock(&desc->lock); } @@ -473,6 +475,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) !desc->action)) { desc->status |= (IRQ_PENDING | IRQ_MASKED); mask_ack_irq(desc, irq); + desc = irq_remap_to_desc(irq, desc); goto out_unlock; } kstat_incr_irqs_this_cpu(irq, desc); @@ -480,6 +483,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) /* Start handling the irq */ if (desc->chip->ack) desc->chip->ack(irq); + desc = irq_remap_to_desc(irq, desc); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; @@ -540,8 +544,10 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) if (!noirqdebug) note_interrupt(irq, desc, action_ret); - if (desc->chip->eoi) + if (desc->chip->eoi) { desc->chip->eoi(irq); + desc = irq_remap_to_desc(irq, desc); + } } void @@ -576,8 +582,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, /* Uninstall? */ if (handle == handle_bad_irq) { - if (desc->chip != &no_irq_chip) + if (desc->chip != &no_irq_chip) { mask_ack_irq(desc, irq); + desc = irq_remap_to_desc(irq, desc); + } desc->status |= IRQ_DISABLED; desc->depth = 1; } diff --git a/trunk/kernel/irq/handle.c b/trunk/kernel/irq/handle.c index 18041a254d32..26e08754744f 100644 --- a/trunk/kernel/irq/handle.c +++ b/trunk/kernel/irq/handle.c @@ -11,7 +11,6 @@ */ #include -#include #include #include #include @@ -82,48 +81,45 @@ static struct irq_desc irq_desc_init = { .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), }; -void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) +void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) { + int node; void *ptr; - if (slab_is_available()) - ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), - GFP_ATOMIC, node); - else - ptr = alloc_bootmem_node(NODE_DATA(node), - nr * sizeof(*desc->kstat_irqs)); + node = cpu_to_node(cpu); + ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); /* * don't overwite if can not get new one * init_copy_kstat_irqs() could still use old one */ if (ptr) { - printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); + printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", + cpu, node); desc->kstat_irqs = ptr; } } -static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) +static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) { memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); spin_lock_init(&desc->lock); desc->irq = irq; #ifdef CONFIG_SMP - desc->node = node; + desc->cpu = cpu; #endif lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_kstat_irqs(desc, node, nr_cpu_ids); + init_kstat_irqs(desc, cpu, nr_cpu_ids); if (!desc->kstat_irqs) { printk(KERN_ERR "can not alloc kstat_irqs\n"); BUG_ON(1); } - if (!alloc_desc_masks(desc, node, false)) { + if (!init_alloc_desc_masks(desc, cpu, false)) { printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); BUG_ON(1); } - init_desc_masks(desc); - arch_init_chip_data(desc, node); + arch_init_chip_data(desc, cpu); } /* @@ -173,8 +169,7 @@ int __init early_irq_init(void) desc[i].irq = i; desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - alloc_desc_masks(&desc[i], 0, true); - init_desc_masks(&desc[i]); + init_alloc_desc_masks(&desc[i], 0, true); irq_desc_ptrs[i] = desc + i; } @@ -192,10 +187,11 @@ struct irq_desc *irq_to_desc(unsigned int irq) return NULL; } -struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) +struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) { struct irq_desc *desc; unsigned long flags; + int node; if (irq >= nr_irqs) { WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", @@ -214,17 +210,15 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) if (desc) goto out_unlock; - if (slab_is_available()) - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); - else - desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); - - printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); + node = cpu_to_node(cpu); + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); + printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", + irq, cpu, node); if (!desc) { printk(KERN_ERR "can not alloc irq_desc\n"); BUG_ON(1); } - init_one_irq_desc(irq, desc, node); + init_one_irq_desc(irq, desc, cpu); irq_desc_ptrs[irq] = desc; @@ -262,8 +256,7 @@ int __init early_irq_init(void) for (i = 0; i < count; i++) { desc[i].irq = i; - alloc_desc_masks(&desc[i], 0, true); - init_desc_masks(&desc[i]); + init_alloc_desc_masks(&desc[i], 0, true); desc[i].kstat_irqs = kstat_irqs_all[i]; } return arch_early_irq_init(); @@ -274,7 +267,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) return (irq < NR_IRQS) ? irq_desc + irq : NULL; } -struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) +struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) { return irq_to_desc(irq); } @@ -460,8 +453,11 @@ unsigned int __do_IRQ(unsigned int irq) /* * No locking required for CPU-local interrupts: */ - if (desc->chip->ack) + if (desc->chip->ack) { desc->chip->ack(irq); + /* get new one */ + desc = irq_remap_to_desc(irq, desc); + } if (likely(!(desc->status & IRQ_DISABLED))) { action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) @@ -472,8 +468,10 @@ unsigned int __do_IRQ(unsigned int irq) } spin_lock(&desc->lock); - if (desc->chip->ack) + if (desc->chip->ack) { desc->chip->ack(irq); + desc = irq_remap_to_desc(irq, desc); + } /* * REPLAY is when Linux resends an IRQ that was dropped earlier * WAITING is used by probe to mark irqs that are being tested diff --git a/trunk/kernel/irq/internals.h b/trunk/kernel/irq/internals.h index 73468253143b..01ce20eab38f 100644 --- a/trunk/kernel/irq/internals.h +++ b/trunk/kernel/irq/internals.h @@ -16,7 +16,7 @@ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); extern struct lock_class_key irq_desc_lock_class; -extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); +extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); extern void clear_kstat_irqs(struct irq_desc *desc); extern spinlock_t sparse_irq_lock; @@ -42,9 +42,6 @@ static inline void unregister_handler_proc(unsigned int irq, extern int irq_select_affinity_usr(unsigned int irq); -extern void -irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask); - /* * Debugging printout: */ diff --git a/trunk/kernel/irq/manage.c b/trunk/kernel/irq/manage.c index aaf5c9d05770..2734eca59243 100644 --- a/trunk/kernel/irq/manage.c +++ b/trunk/kernel/irq/manage.c @@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq) return 1; } -void +static void irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) { struct irqaction *action = desc->action; @@ -109,22 +109,17 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) spin_lock_irqsave(&desc->lock, flags); #ifdef CONFIG_GENERIC_PENDING_IRQ - if (desc->status & IRQ_MOVE_PCNTXT) { - if (!desc->chip->set_affinity(irq, cpumask)) { - cpumask_copy(desc->affinity, cpumask); - irq_set_thread_affinity(desc, cpumask); - } - } + if (desc->status & IRQ_MOVE_PCNTXT) + desc->chip->set_affinity(irq, cpumask); else { desc->status |= IRQ_MOVE_PENDING; cpumask_copy(desc->pending_mask, cpumask); } #else - if (!desc->chip->set_affinity(irq, cpumask)) { - cpumask_copy(desc->affinity, cpumask); - irq_set_thread_affinity(desc, cpumask); - } + cpumask_copy(desc->affinity, cpumask); + desc->chip->set_affinity(irq, cpumask); #endif + irq_set_thread_affinity(desc, cpumask); desc->status |= IRQ_AFFINITY_SET; spin_unlock_irqrestore(&desc->lock, flags); return 0; diff --git a/trunk/kernel/irq/migration.c b/trunk/kernel/irq/migration.c index cfe767ca1545..e05ad9be43b7 100644 --- a/trunk/kernel/irq/migration.c +++ b/trunk/kernel/irq/migration.c @@ -1,8 +1,5 @@ #include -#include - -#include "internals.h" void move_masked_irq(int irq) { @@ -42,12 +39,11 @@ void move_masked_irq(int irq) * masking the irqs. */ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) - < nr_cpu_ids)) - if (!desc->chip->set_affinity(irq, desc->pending_mask)) { - cpumask_copy(desc->affinity, desc->pending_mask); - irq_set_thread_affinity(desc, desc->pending_mask); - } - + < nr_cpu_ids)) { + cpumask_and(desc->affinity, + desc->pending_mask, cpu_online_mask); + desc->chip->set_affinity(irq, desc->affinity); + } cpumask_clear(desc->pending_mask); } diff --git a/trunk/kernel/irq/numa_migrate.c b/trunk/kernel/irq/numa_migrate.c index 2f69bee57bf2..44bbdcbaf8d2 100644 --- a/trunk/kernel/irq/numa_migrate.c +++ b/trunk/kernel/irq/numa_migrate.c @@ -15,9 +15,9 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc, - int node, int nr) + int cpu, int nr) { - init_kstat_irqs(desc, node, nr); + init_kstat_irqs(desc, cpu, nr); if (desc->kstat_irqs != old_desc->kstat_irqs) memcpy(desc->kstat_irqs, old_desc->kstat_irqs, @@ -34,20 +34,20 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) } static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, - struct irq_desc *desc, int node) + struct irq_desc *desc, int cpu) { memcpy(desc, old_desc, sizeof(struct irq_desc)); - if (!alloc_desc_masks(desc, node, false)) { + if (!init_alloc_desc_masks(desc, cpu, false)) { printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " "for migration.\n", irq); return false; } spin_lock_init(&desc->lock); - desc->node = node; + desc->cpu = cpu; lockdep_set_class(&desc->lock, &irq_desc_lock_class); - init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); + init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); init_copy_desc_masks(old_desc, desc); - arch_init_copy_chip_data(old_desc, desc, node); + arch_init_copy_chip_data(old_desc, desc, cpu); return true; } @@ -59,11 +59,12 @@ static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) } static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, - int node) + int cpu) { struct irq_desc *desc; unsigned int irq; unsigned long flags; + int node; irq = old_desc->irq; @@ -75,6 +76,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, if (desc && old_desc != desc) goto out_unlock; + node = cpu_to_node(cpu); desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); if (!desc) { printk(KERN_ERR "irq %d: can not get new irq_desc " @@ -83,7 +85,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, desc = old_desc; goto out_unlock; } - if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { + if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { /* still use old one */ kfree(desc); desc = old_desc; @@ -95,7 +97,9 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, /* free the old one */ free_one_irq_desc(old_desc, desc); + spin_unlock(&old_desc->lock); kfree(old_desc); + spin_lock(&desc->lock); return desc; @@ -105,14 +109,24 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, return desc; } -struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) { + int old_cpu; + int node, old_node; + /* those all static, do move them */ if (desc->irq < NR_IRQS_LEGACY) return desc; - if (desc->node != node) - desc = __real_move_irq_desc(desc, node); + old_cpu = desc->cpu; + if (old_cpu != cpu) { + node = cpu_to_node(cpu); + old_node = cpu_to_node(old_cpu); + if (old_node != node) + desc = __real_move_irq_desc(desc, cpu); + else + desc->cpu = cpu; + } return desc; } diff --git a/trunk/kernel/mutex.c b/trunk/kernel/mutex.c index 6ca5fe96e393..507cf2b5e9f1 100644 --- a/trunk/kernel/mutex.c +++ b/trunk/kernel/mutex.c @@ -249,9 +249,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, /* didnt get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); + __schedule(); spin_lock_mutex(&lock->wait_lock, flags); } diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index 42c317874cfa..0692ab5a0d67 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -304,8 +304,6 @@ int ptrace_detach(struct task_struct *child, unsigned int data) if (child->ptrace) { child->exit_code = data; dead = __ptrace_detach(current, child); - if (!child->exit_state) - wake_up_process(child); } write_unlock_irq(&tasklist_lock); diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index c3c04e256560..26efa475bdc1 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -630,10 +630,6 @@ struct rq { struct list_head migration_queue; #endif - /* calc_load related fields */ - unsigned long calc_load_update; - long calc_load_active; - #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP int hrtick_csd_pending; @@ -1732,8 +1728,6 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) } #endif -static void calc_load_account_active(struct rq *this_rq); - #include "sched_stats.h" #include "sched_idletask.c" #include "sched_fair.c" @@ -2464,17 +2458,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) return success; } -/** - * wake_up_process - Wake up a specific process - * @p: The process to be woken up. - * - * Attempt to wake up the nominated process and move it to the set of runnable - * processes. Returns 1 if the process was woken up, 0 if it was already - * running. - * - * It may be assumed that this function implies a write memory barrier before - * changing the task state if and only if any tasks are woken up. - */ int wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_ALL, 0); @@ -2873,72 +2856,19 @@ unsigned long nr_iowait(void) return sum; } -/* Variables and functions for calc_load */ -static atomic_long_t calc_load_tasks; -static unsigned long calc_load_update; -unsigned long avenrun[3]; -EXPORT_SYMBOL(avenrun); - -/** - * get_avenrun - get the load average array - * @loads: pointer to dest load array - * @offset: offset to add - * @shift: shift count to shift the result left - * - * These values are estimates at best, so no need for locking. - */ -void get_avenrun(unsigned long *loads, unsigned long offset, int shift) -{ - loads[0] = (avenrun[0] + offset) << shift; - loads[1] = (avenrun[1] + offset) << shift; - loads[2] = (avenrun[2] + offset) << shift; -} - -static unsigned long -calc_load(unsigned long load, unsigned long exp, unsigned long active) +unsigned long nr_active(void) { - load *= exp; - load += active * (FIXED_1 - exp); - return load >> FSHIFT; -} + unsigned long i, running = 0, uninterruptible = 0; -/* - * calc_load - update the avenrun load estimates 10 ticks after the - * CPUs have updated calc_load_tasks. - */ -void calc_global_load(void) -{ - unsigned long upd = calc_load_update + 10; - long active; - - if (time_before(jiffies, upd)) - return; - - active = atomic_long_read(&calc_load_tasks); - active = active > 0 ? active * FIXED_1 : 0; - - avenrun[0] = calc_load(avenrun[0], EXP_1, active); - avenrun[1] = calc_load(avenrun[1], EXP_5, active); - avenrun[2] = calc_load(avenrun[2], EXP_15, active); - - calc_load_update += LOAD_FREQ; -} - -/* - * Either called from update_cpu_load() or from a cpu going idle - */ -static void calc_load_account_active(struct rq *this_rq) -{ - long nr_active, delta; + for_each_online_cpu(i) { + running += cpu_rq(i)->nr_running; + uninterruptible += cpu_rq(i)->nr_uninterruptible; + } - nr_active = this_rq->nr_running; - nr_active += (long) this_rq->nr_uninterruptible; + if (unlikely((long)uninterruptible < 0)) + uninterruptible = 0; - if (nr_active != this_rq->calc_load_active) { - delta = nr_active - this_rq->calc_load_active; - this_rq->calc_load_active = nr_active; - atomic_long_add(delta, &calc_load_tasks); - } + return running + uninterruptible; } /* @@ -2969,11 +2899,6 @@ static void update_cpu_load(struct rq *this_rq) new_load += scale-1; this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; } - - if (time_after_eq(jiffies, this_rq->calc_load_update)) { - this_rq->calc_load_update += LOAD_FREQ; - calc_load_account_active(this_rq); - } } #ifdef CONFIG_SMP @@ -4315,126 +4240,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) static struct { atomic_t load_balancer; cpumask_var_t cpu_mask; - cpumask_var_t ilb_grp_nohz_mask; } nohz ____cacheline_aligned = { .load_balancer = ATOMIC_INIT(-1), }; -#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) -/** - * lowest_flag_domain - Return lowest sched_domain containing flag. - * @cpu: The cpu whose lowest level of sched domain is to - * be returned. - * @flag: The flag to check for the lowest sched_domain - * for the given cpu. - * - * Returns the lowest sched_domain of a cpu which contains the given flag. - */ -static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) -{ - struct sched_domain *sd; - - for_each_domain(cpu, sd) - if (sd && (sd->flags & flag)) - break; - - return sd; -} - -/** - * for_each_flag_domain - Iterates over sched_domains containing the flag. - * @cpu: The cpu whose domains we're iterating over. - * @sd: variable holding the value of the power_savings_sd - * for cpu. - * @flag: The flag to filter the sched_domains to be iterated. - * - * Iterates over all the scheduler domains for a given cpu that has the 'flag' - * set, starting from the lowest sched_domain to the highest. - */ -#define for_each_flag_domain(cpu, sd, flag) \ - for (sd = lowest_flag_domain(cpu, flag); \ - (sd && (sd->flags & flag)); sd = sd->parent) - -/** - * is_semi_idle_group - Checks if the given sched_group is semi-idle. - * @ilb_group: group to be checked for semi-idleness - * - * Returns: 1 if the group is semi-idle. 0 otherwise. - * - * We define a sched_group to be semi idle if it has atleast one idle-CPU - * and atleast one non-idle CPU. This helper function checks if the given - * sched_group is semi-idle or not. - */ -static inline int is_semi_idle_group(struct sched_group *ilb_group) -{ - cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask, - sched_group_cpus(ilb_group)); - - /* - * A sched_group is semi-idle when it has atleast one busy cpu - * and atleast one idle cpu. - */ - if (cpumask_empty(nohz.ilb_grp_nohz_mask)) - return 0; - - if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group))) - return 0; - - return 1; -} -/** - * find_new_ilb - Finds the optimum idle load balancer for nomination. - * @cpu: The cpu which is nominating a new idle_load_balancer. - * - * Returns: Returns the id of the idle load balancer if it exists, - * Else, returns >= nr_cpu_ids. - * - * This algorithm picks the idle load balancer such that it belongs to a - * semi-idle powersavings sched_domain. The idea is to try and avoid - * completely idle packages/cores just for the purpose of idle load balancing - * when there are other idle cpu's which are better suited for that job. - */ -static int find_new_ilb(int cpu) -{ - struct sched_domain *sd; - struct sched_group *ilb_group; - - /* - * Have idle load balancer selection from semi-idle packages only - * when power-aware load balancing is enabled - */ - if (!(sched_smt_power_savings || sched_mc_power_savings)) - goto out_done; - - /* - * Optimize for the case when we have no idle CPUs or only one - * idle CPU. Don't walk the sched_domain hierarchy in such cases - */ - if (cpumask_weight(nohz.cpu_mask) < 2) - goto out_done; - - for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { - ilb_group = sd->groups; - - do { - if (is_semi_idle_group(ilb_group)) - return cpumask_first(nohz.ilb_grp_nohz_mask); - - ilb_group = ilb_group->next; - - } while (ilb_group != sd->groups); - } - -out_done: - return cpumask_first(nohz.cpu_mask); -} -#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ -static inline int find_new_ilb(int call_cpu) -{ - return cpumask_first(nohz.cpu_mask); -} -#endif - /* * This routine will try to nominate the ilb (idle load balancing) * owner among the cpus whose ticks are stopped. ilb owner will do the idle @@ -4489,24 +4298,8 @@ int select_nohz_load_balancer(int stop_tick) /* make me the ilb owner */ if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) return 1; - } else if (atomic_read(&nohz.load_balancer) == cpu) { - int new_ilb; - - if (!(sched_smt_power_savings || - sched_mc_power_savings)) - return 1; - /* - * Check to see if there is a more power-efficient - * ilb. - */ - new_ilb = find_new_ilb(cpu); - if (new_ilb < nr_cpu_ids && new_ilb != cpu) { - atomic_set(&nohz.load_balancer, -1); - resched_cpu(new_ilb); - return 0; - } + } else if (atomic_read(&nohz.load_balancer) == cpu) return 1; - } } else { if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) return 0; @@ -4675,7 +4468,15 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) } if (atomic_read(&nohz.load_balancer) == -1) { - int ilb = find_new_ilb(cpu); + /* + * simple selection for now: Nominate the + * first cpu in the nohz list to be the next + * ilb owner. + * + * TBD: Traverse the sched domains and nominate + * the nearest cpu in the nohz.cpu_mask. + */ + int ilb = cpumask_first(nohz.cpu_mask); if (ilb < nr_cpu_ids) resched_cpu(ilb); @@ -5206,15 +5007,13 @@ pick_next_task(struct rq *rq) /* * schedule() is the main scheduler function. */ -asmlinkage void __sched schedule(void) +asmlinkage void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; -need_resched: - preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_qsctr_inc(cpu); @@ -5271,9 +5070,15 @@ asmlinkage void __sched schedule(void) if (unlikely(reacquire_kernel_lock(current) < 0)) goto need_resched_nonpreemptible; +} +asmlinkage void __sched schedule(void) +{ +need_resched: + preempt_disable(); + __schedule(); preempt_enable_no_resched(); - if (need_resched()) + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } EXPORT_SYMBOL(schedule); @@ -5416,7 +5221,7 @@ EXPORT_SYMBOL(default_wake_function); * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ -static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, +void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync, void *key) { wait_queue_t *curr, *next; @@ -5436,9 +5241,6 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function - * - * It may be assumed that this function implies a write memory barrier before - * changing the task state if and only if any tasks are woken up. */ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) @@ -5477,9 +5279,6 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. - * - * It may be assumed that this function implies a write memory barrier before - * changing the task state if and only if any tasks are woken up. */ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) @@ -5516,9 +5315,6 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ * awakened in the same order in which they were queued. * * See also complete_all(), wait_for_completion() and related routines. - * - * It may be assumed that this function implies a write memory barrier before - * changing the task state if and only if any tasks are woken up. */ void complete(struct completion *x) { @@ -5536,9 +5332,6 @@ EXPORT_SYMBOL(complete); * @x: holds the state of this particular completion * * This will wake up all threads waiting on this particular completion event. - * - * It may be assumed that this function implies a write memory barrier before - * changing the task state if and only if any tasks are woken up. */ void complete_all(struct completion *x) { @@ -6697,9 +6490,8 @@ void sched_show_task(struct task_struct *p) #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); #endif - printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, - task_pid_nr(p), task_pid_nr(p->real_parent), - (unsigned long)task_thread_info(p)->flags); + printk(KERN_CONT "%5lu %5d %6d\n", free, + task_pid_nr(p), task_pid_nr(p->real_parent)); show_stack(p, NULL); } @@ -7178,14 +6970,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu) } } - -/* - * remove the tasks which were accounted by rq from calc_load_tasks. - */ -static void calc_global_load_remove(struct rq *rq) -{ - atomic_long_sub(rq->calc_load_active, &calc_load_tasks); -} #endif /* CONFIG_HOTPLUG_CPU */ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) @@ -7420,8 +7204,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Update our root-domain */ rq = cpu_rq(cpu); spin_lock_irqsave(&rq->lock, flags); - rq->calc_load_update = calc_load_update; - rq->calc_load_active = 0; if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); @@ -7461,7 +7243,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) cpuset_unlock(); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); - calc_global_load_remove(rq); + /* * No need to migrate the tasks: it was best-effort if * they didn't take sched_hotcpu_mutex. Just wake up @@ -7971,9 +7753,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; /* * The cpus mask in sched_group and sched_domain hangs off the end. - * - * ( See the the comments in include/linux/sched.h:struct sched_group - * and struct sched_domain. ) + * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space + * for nr_cpu_ids < CONFIG_NR_CPUS. */ struct static_sched_group { struct sched_group sg; @@ -8094,7 +7875,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) struct sched_domain *sd; sd = &per_cpu(phys_domains, j).sd; - if (j != group_first_cpu(sd->groups)) { + if (j != cpumask_first(sched_group_cpus(sd->groups))) { /* * Only add "power" once for each * physical package. @@ -8172,7 +7953,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) WARN_ON(!sd || !sd->groups); - if (cpu != group_first_cpu(sd->groups)) + if (cpu != cpumask_first(sched_group_cpus(sd->groups))) return; child = sd->child; @@ -9157,8 +8938,6 @@ void __init sched_init(void) rq = cpu_rq(i); spin_lock_init(&rq->lock); rq->nr_running = 0; - rq->calc_load_active = 0; - rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED @@ -9266,9 +9045,6 @@ void __init sched_init(void) * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); - - calc_load_update = jiffies + LOAD_FREQ; - /* * During early bootup we pretend to be a normal task: */ @@ -9279,7 +9055,6 @@ void __init sched_init(void) #ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ alloc_bootmem_cpumask_var(&nohz.cpu_mask); - alloc_bootmem_cpumask_var(&nohz.ilb_grp_nohz_mask); #endif alloc_bootmem_cpumask_var(&cpu_isolated_map); #endif /* SMP */ @@ -10025,13 +9800,6 @@ static int sched_rt_global_constraints(void) if (sysctl_sched_rt_period <= 0) return -EINVAL; - /* - * There's always some RT tasks in the root group - * -- migration, kstopmachine etc.. - */ - if (sysctl_sched_rt_runtime == 0) - return -EBUSY; - spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); for_each_possible_cpu(i) { struct rt_rq *rt_rq = &cpu_rq(i)->rt; diff --git a/trunk/kernel/sched_cpupri.c b/trunk/kernel/sched_cpupri.c index 344712a5e3ed..cdd3c89574cd 100644 --- a/trunk/kernel/sched_cpupri.c +++ b/trunk/kernel/sched_cpupri.c @@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem) vec->count = 0; if (bootmem) alloc_bootmem_cpumask_var(&vec->mask); - else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) + else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 5f9650e8fe75..3816f217f119 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -1487,10 +1487,17 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) find_matching_se(&se, &pse); - BUG_ON(!pse); + while (se) { + BUG_ON(!pse); - if (wakeup_preempt_entity(se, pse) == 1) - resched_task(curr); + if (wakeup_preempt_entity(se, pse) == 1) { + resched_task(curr); + break; + } + + se = parent_entity(se); + pse = parent_entity(pse); + } } static struct task_struct *pick_next_task_fair(struct rq *rq) diff --git a/trunk/kernel/sched_idletask.c b/trunk/kernel/sched_idletask.c index 499672c10cbd..8a21a2e28c13 100644 --- a/trunk/kernel/sched_idletask.c +++ b/trunk/kernel/sched_idletask.c @@ -22,8 +22,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sy static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); - /* adjust the active tasks as we might go into a long sleep */ - calc_load_account_active(rq); + return rq->idle; } diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 9bf0d2a73045..f2c66f8f9712 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void) unsigned int i; for_each_possible_cpu(i) - zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), + alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i)); } #endif /* CONFIG_SMP */ diff --git a/trunk/kernel/smp.c b/trunk/kernel/smp.c index ad63d8501207..858baac568ee 100644 --- a/trunk/kernel/smp.c +++ b/trunk/kernel/smp.c @@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, + if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return NOTIFY_BAD; break; diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index f674f332a024..b525dd348511 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -828,7 +828,7 @@ int __init __weak arch_early_irq_init(void) return 0; } -int __weak arch_init_chip_data(struct irq_desc *desc, int node) +int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) { return 0; } diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c index 6a463716ecbf..b2970d56fb76 100644 --- a/trunk/kernel/sysctl.c +++ b/trunk/kernel/sysctl.c @@ -729,14 +729,6 @@ static struct ctl_table kern_table[] = { .mode = 0444, .proc_handler = &proc_dointvec, }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "bootloader_version", - .data = &bootloader_version, - .maxlen = sizeof (int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, { .ctl_name = CTL_UNNUMBERED, .procname = "kstack_depth_to_print", diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index 52a8bf8931f3..687dff49f6e7 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -22,7 +22,7 @@ /* * This read-write spinlock protects us from races in SMP while - * playing with xtime. + * playing with xtime and avenrun. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index a26ed294f938..cffffad01c31 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -1122,6 +1122,47 @@ void update_process_times(int user_tick) run_posix_cpu_timers(p); } +/* + * Nr of active tasks - counted in fixed-point numbers + */ +static unsigned long count_active_tasks(void) +{ + return nr_active() * FIXED_1; +} + +/* + * Hmm.. Changed this, as the GNU make sources (load.c) seems to + * imply that avenrun[] is the standard name for this kind of thing. + * Nothing else seems to be standardized: the fractional size etc + * all seem to differ on different machines. + * + * Requires xtime_lock to access. + */ +unsigned long avenrun[3]; + +EXPORT_SYMBOL(avenrun); + +/* + * calc_load - given tick count, update the avenrun load estimates. + * This is called while holding a write_lock on xtime_lock. + */ +static inline void calc_load(unsigned long ticks) +{ + unsigned long active_tasks; /* fixed-point */ + static int count = LOAD_FREQ; + + count -= ticks; + if (unlikely(count < 0)) { + active_tasks = count_active_tasks(); + do { + CALC_LOAD(avenrun[0], EXP_1, active_tasks); + CALC_LOAD(avenrun[1], EXP_5, active_tasks); + CALC_LOAD(avenrun[2], EXP_15, active_tasks); + count += LOAD_FREQ; + } while (count < 0); + } +} + /* * This function runs timers and the timer-tq in bottom half context. */ @@ -1145,6 +1186,16 @@ void run_local_timers(void) softlockup_tick(); } +/* + * Called by the timer interrupt. xtime_lock must already be taken + * by the timer IRQ! + */ +static inline void update_times(unsigned long ticks) +{ + update_wall_time(); + calc_load(ticks); +} + /* * The 64-bit jiffies value is not atomic - you MUST NOT read it * without sampling the sequence number in xtime_lock. @@ -1154,8 +1205,7 @@ void run_local_timers(void) void do_timer(unsigned long ticks) { jiffies_64 += ticks; - update_wall_time(); - calc_global_load(); + update_times(ticks); } #ifdef __ARCH_WANT_SYS_ALARM @@ -1356,17 +1406,37 @@ int do_sysinfo(struct sysinfo *info) { unsigned long mem_total, sav_total; unsigned int mem_unit, bitcount; - struct timespec tp; + unsigned long seq; memset(info, 0, sizeof(struct sysinfo)); - ktime_get_ts(&tp); - monotonic_to_bootbased(&tp); - info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); + do { + struct timespec tp; + seq = read_seqbegin(&xtime_lock); + + /* + * This is annoying. The below is the same thing + * posix_get_clock_monotonic() does, but it wants to + * take the lock which we want to cover the loads stuff + * too. + */ + + getnstimeofday(&tp); + tp.tv_sec += wall_to_monotonic.tv_sec; + tp.tv_nsec += wall_to_monotonic.tv_nsec; + monotonic_to_bootbased(&tp); + if (tp.tv_nsec - NSEC_PER_SEC >= 0) { + tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; + tp.tv_sec++; + } + info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); - get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); + info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); + info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); + info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); - info->procs = nr_threads; + info->procs = nr_threads; + } while (read_seqretry(&xtime_lock, seq)); si_meminfo(info); si_swapinfo(info); diff --git a/trunk/kernel/wait.c b/trunk/kernel/wait.c index ea7c3b4275cf..42a2dbc181c8 100644 --- a/trunk/kernel/wait.c +++ b/trunk/kernel/wait.c @@ -154,7 +154,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, if (!list_empty(&wait->task_list)) list_del_init(&wait->task_list); else if (waitqueue_active(q)) - __wake_up_locked_key(q, mode, key); + __wake_up_common(q, mode, 1, 0, key); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(abort_exclusive_wait); diff --git a/trunk/lib/cpumask.c b/trunk/lib/cpumask.c index eb23aaa0c7b8..1f71b97de0f9 100644 --- a/trunk/lib/cpumask.c +++ b/trunk/lib/cpumask.c @@ -119,12 +119,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) } EXPORT_SYMBOL(alloc_cpumask_var_node); -bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) -{ - return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); -} -EXPORT_SYMBOL(zalloc_cpumask_var_node); - /** * alloc_cpumask_var - allocate a struct cpumask * @mask: pointer to cpumask_var_t where the cpumask is returned @@ -141,12 +135,6 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) } EXPORT_SYMBOL(alloc_cpumask_var); -bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) -{ - return alloc_cpumask_var(mask, flags | __GFP_ZERO); -} -EXPORT_SYMBOL(zalloc_cpumask_var); - /** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 474c7e9dd51a..fe753ecf2aa5 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -149,6 +149,10 @@ static unsigned long __meminitdata dma_reserve; static int __meminitdata nr_nodemap_entries; static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE + static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; + static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; +#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ static unsigned long __initdata required_kernelcore; static unsigned long __initdata required_movablecore; static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; @@ -3098,6 +3102,64 @@ void __init sparse_memory_present_with_active_regions(int nid) early_node_map[i].end_pfn); } +/** + * push_node_boundaries - Push node boundaries to at least the requested boundary + * @nid: The nid of the node to push the boundary for + * @start_pfn: The start pfn of the node + * @end_pfn: The end pfn of the node + * + * In reserve-based hot-add, mem_map is allocated that is unused until hotadd + * time. Specifically, on x86_64, SRAT will report ranges that can potentially + * be hotplugged even though no physical memory exists. This function allows + * an arch to push out the node boundaries so mem_map is allocated that can + * be used later. + */ +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE +void __init push_node_boundaries(unsigned int nid, + unsigned long start_pfn, unsigned long end_pfn) +{ + mminit_dprintk(MMINIT_TRACE, "zoneboundary", + "Entering push_node_boundaries(%u, %lu, %lu)\n", + nid, start_pfn, end_pfn); + + /* Initialise the boundary for this node if necessary */ + if (node_boundary_end_pfn[nid] == 0) + node_boundary_start_pfn[nid] = -1UL; + + /* Update the boundaries */ + if (node_boundary_start_pfn[nid] > start_pfn) + node_boundary_start_pfn[nid] = start_pfn; + if (node_boundary_end_pfn[nid] < end_pfn) + node_boundary_end_pfn[nid] = end_pfn; +} + +/* If necessary, push the node boundary out for reserve hotadd */ +static void __meminit account_node_boundary(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn) +{ + mminit_dprintk(MMINIT_TRACE, "zoneboundary", + "Entering account_node_boundary(%u, %lu, %lu)\n", + nid, *start_pfn, *end_pfn); + + /* Return if boundary information has not been provided */ + if (node_boundary_end_pfn[nid] == 0) + return; + + /* Check the boundaries and update if necessary */ + if (node_boundary_start_pfn[nid] < *start_pfn) + *start_pfn = node_boundary_start_pfn[nid]; + if (node_boundary_end_pfn[nid] > *end_pfn) + *end_pfn = node_boundary_end_pfn[nid]; +} +#else +void __init push_node_boundaries(unsigned int nid, + unsigned long start_pfn, unsigned long end_pfn) {} + +static void __meminit account_node_boundary(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn) {} +#endif + + /** * get_pfn_range_for_nid - Return the start and end page frames for a node * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. @@ -3123,6 +3185,9 @@ void __meminit get_pfn_range_for_nid(unsigned int nid, if (*start_pfn == -1UL) *start_pfn = 0; + + /* Push the node boundaries out if requested */ + account_node_boundary(nid, start_pfn, end_pfn); } /* @@ -3728,6 +3793,10 @@ void __init remove_all_active_ranges(void) { memset(early_node_map, 0, sizeof(early_node_map)); nr_nodemap_entries = 0; +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE + memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); + memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); +#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ } /* Compare two active node_active_regions */ diff --git a/trunk/net/bluetooth/hci_sysfs.c b/trunk/net/bluetooth/hci_sysfs.c index 95f7a7a544b4..4cc3624bd22d 100644 --- a/trunk/net/bluetooth/hci_sysfs.c +++ b/trunk/net/bluetooth/hci_sysfs.c @@ -90,6 +90,9 @@ static void add_conn(struct work_struct *work) struct hci_conn *conn = container_of(work, struct hci_conn, work_add); struct hci_dev *hdev = conn->hdev; + /* ensure previous del is complete */ + flush_work(&conn->work_del); + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) { @@ -115,6 +118,9 @@ static void del_conn(struct work_struct *work) struct hci_conn *conn = container_of(work, struct hci_conn, work_del); struct hci_dev *hdev = conn->hdev; + /* ensure previous add is complete */ + flush_work(&conn->work_add); + if (!device_is_registered(&conn->dev)) return; diff --git a/trunk/net/sched/cls_api.c b/trunk/net/sched/cls_api.c index 09cdcdfe7e91..0759f32e9dca 100644 --- a/trunk/net/sched/cls_api.c +++ b/trunk/net/sched/cls_api.c @@ -135,7 +135,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) unsigned long cl; unsigned long fh; int err; - int tp_created = 0; if (net != &init_net) return -EINVAL; @@ -267,7 +266,10 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) goto errout; } - tp_created = 1; + spin_lock_bh(root_lock); + tp->next = *back; + *back = tp; + spin_unlock_bh(root_lock); } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) goto errout; @@ -294,11 +296,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) switch (n->nlmsg_type) { case RTM_NEWTFILTER: err = -EEXIST; - if (n->nlmsg_flags & NLM_F_EXCL) { - if (tp_created) - tcf_destroy(tp); + if (n->nlmsg_flags & NLM_F_EXCL) goto errout; - } break; case RTM_DELTFILTER: err = tp->ops->delete(tp, fh); @@ -315,18 +314,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) } err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); - if (err == 0) { - if (tp_created) { - spin_lock_bh(root_lock); - tp->next = *back; - *back = tp; - spin_unlock_bh(root_lock); - } + if (err == 0) tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); - } else { - if (tp_created) - tcf_destroy(tp); - } errout: if (cl) diff --git a/trunk/net/sched/cls_cgroup.c b/trunk/net/sched/cls_cgroup.c index e5becb92b3e7..cc29b44b1500 100644 --- a/trunk/net/sched/cls_cgroup.c +++ b/trunk/net/sched/cls_cgroup.c @@ -167,9 +167,6 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, struct tcf_exts e; int err; - if (!tca[TCA_OPTIONS]) - return -EINVAL; - if (head == NULL) { if (!handle) return -EINVAL; diff --git a/trunk/scripts/Makefile.lib b/trunk/scripts/Makefile.lib index 2b706617c89a..cba61ca403ca 100644 --- a/trunk/scripts/Makefile.lib +++ b/trunk/scripts/Makefile.lib @@ -188,34 +188,20 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@ # --------------------------------------------------------------------------- quiet_cmd_gzip = GZIP $@ -cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -f -9 > $@) || \ - (rm -f $@ ; false) +cmd_gzip = gzip -f -9 < $< > $@ # Bzip2 # --------------------------------------------------------------------------- -# Bzip2 and LZMA do not include size in file... so we have to fake that; -# append the size as a 32-bit littleendian number as gzip does. -size_append = echo -ne $(shell \ -dec_size=0; \ -for F in $1; do \ - fsize=$$(stat -c "%s" $$F); \ - dec_size=$$(expr $$dec_size + $$fsize); \ -done; \ -printf "%08x" $$dec_size | \ - sed 's/\(..\)\(..\)\(..\)\(..\)/\\\\x\4\\\\x\3\\\\x\2\\\\x\1/g' \ -) - -quiet_cmd_bzip2 = BZIP2 $@ -cmd_bzip2 = (cat $(filter-out FORCE,$^) | \ - bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) +# Bzip2 does not include size in file... so we have to fake that +size_append=$(CONFIG_SHELL) $(srctree)/scripts/bin_size + +quiet_cmd_bzip2 = BZIP2 $@ +cmd_bzip2 = (bzip2 -9 < $< && $(size_append) $<) > $@ || (rm -f $@ ; false) # Lzma # --------------------------------------------------------------------------- quiet_cmd_lzma = LZMA $@ -cmd_lzma = (cat $(filter-out FORCE,$^) | \ - lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ - (rm -f $@ ; false) +cmd_lzma = (lzma -9 -c $< && $(size_append) $<) >$@ || (rm -f $@ ; false) diff --git a/trunk/scripts/bin_size b/trunk/scripts/bin_size new file mode 100644 index 000000000000..43e1b360cee6 --- /dev/null +++ b/trunk/scripts/bin_size @@ -0,0 +1,10 @@ +#!/bin/sh + +if [ $# = 0 ] ; then + echo Usage: $0 file +fi + +size_dec=`stat -c "%s" $1` +size_hex_echo_string=`printf "%08x" $size_dec | + sed 's/\(..\)\(..\)\(..\)\(..\)/\\\\x\4\\\\x\3\\\\x\2\\\\x\1/g'` +/bin/echo -ne $size_hex_echo_string diff --git a/trunk/virt/kvm/kvm_main.c b/trunk/virt/kvm/kvm_main.c index 4d0dd390aa50..1ecbe2391c8b 100644 --- a/trunk/virt/kvm/kvm_main.c +++ b/trunk/virt/kvm/kvm_main.c @@ -2301,11 +2301,10 @@ int kvm_init(void *opaque, unsigned int vcpu_size, bad_pfn = page_to_pfn(bad_page); - if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { + if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; } - cpumask_clear(cpus_hardware_enabled); r = kvm_arch_hardware_setup(); if (r < 0)