diff --git a/[refs] b/[refs] index 39db08b4bc44..801296d462a7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: cd96891d48a945ca2011fbeceda73813d6286195 +refs/heads/master: ceb1cbac8eda66cf0f889def226b4e82f8ff857b diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 14bc7071f9df..f935a0cef404 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -1077,7 +1077,7 @@ F: drivers/media/video/s5p-fimc/ ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT M: Kyungmin Park M: Kamil Debski -M: Jeongtae Park +M: Jeongtae Park L: linux-arm-kernel@lists.infradead.org L: linux-media@vger.kernel.org S: Maintained @@ -1743,10 +1743,10 @@ F: include/linux/can/platform/ CAPABILITIES M: Serge Hallyn L: linux-security-module@vger.kernel.org -S: Supported +S: Supported F: include/linux/capability.h F: security/capability.c -F: security/commoncap.c +F: security/commoncap.c F: kernel/capability.c CELL BROADBAND ENGINE ARCHITECTURE @@ -2146,11 +2146,11 @@ S: Orphan F: drivers/net/wan/pc300* CYTTSP TOUCHSCREEN DRIVER -M: Javier Martinez Canillas -L: linux-input@vger.kernel.org -S: Maintained -F: drivers/input/touchscreen/cyttsp* -F: include/linux/input/cyttsp.h +M: Javier Martinez Canillas +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/input/touchscreen/cyttsp* +F: include/linux/input/cyttsp.h DAMA SLAVE for AX.25 M: Joerg Reuter @@ -2270,7 +2270,7 @@ F: include/linux/device-mapper.h F: include/linux/dm-*.h DIOLAN U2C-12 I2C DRIVER -M: Guenter Roeck +M: Guenter Roeck L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-diolan-u2c.c @@ -3145,7 +3145,7 @@ F: drivers/tty/hvc/ HARDWARE MONITORING M: Jean Delvare -M: Guenter Roeck +M: Guenter Roeck L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ @@ -4420,13 +4420,6 @@ S: Orphan F: drivers/video/matrox/matroxfb_* F: include/linux/matroxfb.h -MAX16065 HARDWARE MONITOR DRIVER -M: Guenter Roeck -L: lm-sensors@lm-sensors.org -S: Maintained -F: Documentation/hwmon/max16065 -F: drivers/hwmon/max16065.c - MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: "Hans J. Koch" L: lm-sensors@lm-sensors.org @@ -5165,7 +5158,7 @@ F: drivers/leds/leds-pca9532.c F: include/linux/leds-pca9532.h PCA9541 I2C BUS MASTER SELECTOR DRIVER -M: Guenter Roeck +M: Guenter Roeck L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/muxes/i2c-mux-pca9541.c @@ -5185,7 +5178,7 @@ S: Maintained F: drivers/firmware/pcdp.* PCI ERROR RECOVERY -M: Linas Vepstas +M: Linas Vepstas L: linux-pci@vger.kernel.org S: Supported F: Documentation/PCI/pci-error-recovery.txt @@ -5315,7 +5308,7 @@ F: drivers/video/fb-puv3.c F: drivers/rtc/rtc-puv3.c PMBUS HARDWARE MONITORING DRIVERS -M: Guenter Roeck +M: Guenter Roeck L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ W: http://www.roeck-us.net/linux/drivers/ diff --git a/trunk/arch/parisc/Makefile b/trunk/arch/parisc/Makefile index 5707f1a62341..dbc3850b1d0d 100644 --- a/trunk/arch/parisc/Makefile +++ b/trunk/arch/parisc/Makefile @@ -21,7 +21,6 @@ KBUILD_DEFCONFIG := default_defconfig NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 -LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) MACHINE := $(shell uname -m) ifeq ($(MACHINE),parisc*) @@ -80,7 +79,7 @@ kernel-y := mm/ kernel/ math-emu/ kernel-$(CONFIG_HPUX) += hpux/ core-y += $(addprefix arch/parisc/, $(kernel-y)) -libs-y += arch/parisc/lib/ $(LIBGCC) +libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ diff --git a/trunk/arch/parisc/include/asm/Kbuild b/trunk/arch/parisc/include/asm/Kbuild index 4383707d9801..19a434f55059 100644 --- a/trunk/arch/parisc/include/asm/Kbuild +++ b/trunk/arch/parisc/include/asm/Kbuild @@ -1,4 +1,3 @@ include include/asm-generic/Kbuild.asm header-y += pdc.h -generic-y += word-at-a-time.h diff --git a/trunk/arch/parisc/include/asm/bug.h b/trunk/arch/parisc/include/asm/bug.h index 62a33338549c..72cfdb0cfdd1 100644 --- a/trunk/arch/parisc/include/asm/bug.h +++ b/trunk/arch/parisc/include/asm/bug.h @@ -1,8 +1,6 @@ #ifndef _PARISC_BUG_H #define _PARISC_BUG_H -#include /* for BUGFLAG_TAINT */ - /* * Tell the user there is some problem. * The offending file and line are encoded in the __bug_table section. diff --git a/trunk/arch/powerpc/kernel/module_32.c b/trunk/arch/powerpc/kernel/module_32.c index 2e3200ca485f..0b6d79617d7b 100644 --- a/trunk/arch/powerpc/kernel/module_32.c +++ b/trunk/arch/powerpc/kernel/module_32.c @@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) { - if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) - && entry->jump[1] == 0x398c0000 + (val & 0xffff)) + if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) + && entry->jump[1] == 0x396b0000 + (val & 0xffff)) return 1; return 0; } @@ -204,9 +204,10 @@ static uint32_t do_plt_call(void *location, entry++; } - entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ - entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ - entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ + /* Stolen from Paul Mackerras as well... */ + entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ + entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ + entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ entry->jump[3] = 0x4e800420; /* bctr */ DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index be171ee73bf8..99a995c2a3f2 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -475,7 +475,6 @@ void timer_interrupt(struct pt_regs * regs) struct pt_regs *old_regs; u64 *next_tb = &__get_cpu_var(decrementers_next_tb); struct clock_event_device *evt = &__get_cpu_var(decrementers); - u64 now; /* Ensure a positive value is written to the decrementer, or else * some CPUs will continue to take decrementer exceptions. @@ -510,16 +509,9 @@ void timer_interrupt(struct pt_regs * regs) irq_work_run(); } - now = get_tb_or_rtc(); - if (now >= *next_tb) { - *next_tb = ~(u64)0; - if (evt->event_handler) - evt->event_handler(evt); - } else { - now = *next_tb - now; - if (now <= DECREMENTER_MAX) - set_dec((int)now); - } + *next_tb = ~(u64)0; + if (evt->event_handler) + evt->event_handler(evt); #ifdef CONFIG_PPC64 /* collect purr register values often, for accurate calculations */ diff --git a/trunk/arch/tile/include/asm/thread_info.h b/trunk/arch/tile/include/asm/thread_info.h index e9c670d7a7fe..7e1fef36bde6 100644 --- a/trunk/arch/tile/include/asm/thread_info.h +++ b/trunk/arch/tile/include/asm/thread_info.h @@ -91,6 +91,11 @@ extern void smp_nap(void); /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ extern void _cpu_idle(void); +/* Switch boot idle thread to a freshly-allocated stack and free old stack. */ +extern void cpu_idle_on_new_stack(struct thread_info *old_ti, + unsigned long new_sp, + unsigned long new_ss10); + #else /* __ASSEMBLY__ */ /* diff --git a/trunk/arch/tile/kernel/entry.S b/trunk/arch/tile/kernel/entry.S index c31637baff28..133c4b56a99e 100644 --- a/trunk/arch/tile/kernel/entry.S +++ b/trunk/arch/tile/kernel/entry.S @@ -68,6 +68,20 @@ STD_ENTRY(KBacktraceIterator_init_current) jrp lr /* keep backtracer happy */ STD_ENDPROC(KBacktraceIterator_init_current) +/* + * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then + * free the old stack (passed in r0) and re-invoke cpu_idle(). + * We update sp and ksp0 simultaneously to avoid backtracer warnings. + */ +STD_ENTRY(cpu_idle_on_new_stack) + { + move sp, r1 + mtspr SPR_SYSTEM_SAVE_K_0, r2 + } + jal free_thread_info + j cpu_idle + STD_ENDPROC(cpu_idle_on_new_stack) + /* Loop forever on a nap during SMP boot. */ STD_ENTRY(smp_nap) nap diff --git a/trunk/arch/tile/kernel/setup.c b/trunk/arch/tile/kernel/setup.c index dd87f3420390..6098ccc59be2 100644 --- a/trunk/arch/tile/kernel/setup.c +++ b/trunk/arch/tile/kernel/setup.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/x86/boot/header.S b/trunk/arch/x86/boot/header.S index efe5acfc79c3..8bbea6aa40d9 100644 --- a/trunk/arch/x86/boot/header.S +++ b/trunk/arch/x86/boot/header.S @@ -94,10 +94,10 @@ bs_die: .section ".bsdata", "a" bugger_off_msg: - .ascii "Direct floppy boot is not supported. " - .ascii "Use a boot loader program instead.\r\n" + .ascii "Direct booting from floppy is no longer supported.\r\n" + .ascii "Please use a boot loader program instead.\r\n" .ascii "\n" - .ascii "Remove disk and press any key to reboot ...\r\n" + .ascii "Remove disk and press any key to reboot . . .\r\n" .byte 0 #ifdef CONFIG_EFI_STUB @@ -111,7 +111,7 @@ coff_header: #else .word 0x8664 # x86-64 #endif - .word 3 # nr_sections + .word 2 # nr_sections .long 0 # TimeDateStamp .long 0 # PointerToSymbolTable .long 1 # NumberOfSymbols @@ -158,8 +158,8 @@ extra_header_fields: #else .quad 0 # ImageBase #endif - .long 0x20 # SectionAlignment - .long 0x20 # FileAlignment + .long 0x1000 # SectionAlignment + .long 0x200 # FileAlignment .word 0 # MajorOperatingSystemVersion .word 0 # MinorOperatingSystemVersion .word 0 # MajorImageVersion @@ -200,10 +200,8 @@ extra_header_fields: # Section table section_table: - # - # The offset & size fields are filled in by build.c. - # - .ascii ".setup" + .ascii ".text" + .byte 0 .byte 0 .byte 0 .long 0 @@ -219,8 +217,9 @@ section_table: # # The EFI application loader requires a relocation section - # because EFI applications must be relocatable. The .reloc - # offset & size fields are filled in by build.c. + # because EFI applications must be relocatable. But since + # we don't need the loader to fixup any relocs for us, we + # just create an empty (zero-length) .reloc section header. # .ascii ".reloc" .byte 0 @@ -234,25 +233,6 @@ section_table: .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long 0x42100040 # Characteristics (section flags) - - # - # The offset & size fields are filled in by build.c. - # - .ascii ".text" - .byte 0 - .byte 0 - .byte 0 - .long 0 - .long 0x0 # startup_{32,64} - .long 0 # Size of initialized data - # on disk - .long 0x0 # startup_{32,64} - .long 0 # PointerToRelocations - .long 0 # PointerToLineNumbers - .word 0 # NumberOfRelocations - .word 0 # NumberOfLineNumbers - .long 0x60500020 # Characteristics (section flags) - #endif /* CONFIG_EFI_STUB */ # Kernel attributes; used by setup. This is part 1 of the diff --git a/trunk/arch/x86/boot/tools/build.c b/trunk/arch/x86/boot/tools/build.c index 4b8e165ee572..3f61f6e2b46f 100644 --- a/trunk/arch/x86/boot/tools/build.c +++ b/trunk/arch/x86/boot/tools/build.c @@ -50,8 +50,6 @@ typedef unsigned int u32; u8 buf[SETUP_SECT_MAX*512]; int is_big_kernel; -#define PECOFF_RELOC_RESERVE 0x20 - /*----------------------------------------------------------------------*/ static const u32 crctab32[] = { @@ -135,103 +133,11 @@ static void usage(void) die("Usage: build setup system [> image]"); } -#ifdef CONFIG_EFI_STUB - -static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) -{ - unsigned int pe_header; - unsigned short num_sections; - u8 *section; - - pe_header = get_unaligned_le32(&buf[0x3c]); - num_sections = get_unaligned_le16(&buf[pe_header + 6]); - -#ifdef CONFIG_X86_32 - section = &buf[pe_header + 0xa8]; -#else - section = &buf[pe_header + 0xb8]; -#endif - - while (num_sections > 0) { - if (strncmp((char*)section, section_name, 8) == 0) { - /* section header size field */ - put_unaligned_le32(size, section + 0x8); - - /* section header vma field */ - put_unaligned_le32(offset, section + 0xc); - - /* section header 'size of initialised data' field */ - put_unaligned_le32(size, section + 0x10); - - /* section header 'file offset' field */ - put_unaligned_le32(offset, section + 0x14); - - break; - } - section += 0x28; - num_sections--; - } -} - -static void update_pecoff_setup_and_reloc(unsigned int size) -{ - u32 setup_offset = 0x200; - u32 reloc_offset = size - PECOFF_RELOC_RESERVE; - u32 setup_size = reloc_offset - setup_offset; - - update_pecoff_section_header(".setup", setup_offset, setup_size); - update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE); - - /* - * Modify .reloc section contents with a single entry. The - * relocation is applied to offset 10 of the relocation section. - */ - put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]); - put_unaligned_le32(10, &buf[reloc_offset + 4]); -} - -static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) -{ - unsigned int pe_header; - unsigned int text_sz = file_sz - text_start; - - pe_header = get_unaligned_le32(&buf[0x3c]); - - /* Size of image */ - put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); - - /* - * Size of code: Subtract the size of the first sector (512 bytes) - * which includes the header. - */ - put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); - -#ifdef CONFIG_X86_32 - /* - * Address of entry point. - * - * The EFI stub entry point is +16 bytes from the start of - * the .text section. - */ - put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); -#else - /* - * Address of entry point. startup_32 is at the beginning and - * the 64-bit entry point (startup_64) is always 512 bytes - * after. The EFI stub entry point is 16 bytes after that, as - * the first instruction allows legacy loaders to jump over - * the EFI stub initialisation - */ - put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); -#endif /* CONFIG_X86_32 */ - - update_pecoff_section_header(".text", text_start, text_sz); -} - -#endif /* CONFIG_EFI_STUB */ - int main(int argc, char ** argv) { +#ifdef CONFIG_EFI_STUB + unsigned int file_sz, pe_header; +#endif unsigned int i, sz, setup_sectors; int c; u32 sys_size; @@ -257,12 +163,6 @@ int main(int argc, char ** argv) die("Boot block hasn't got boot flag (0xAA55)"); fclose(file); -#ifdef CONFIG_EFI_STUB - /* Reserve 0x20 bytes for .reloc section */ - memset(buf+c, 0, PECOFF_RELOC_RESERVE); - c += PECOFF_RELOC_RESERVE; -#endif - /* Pad unused space with zeros */ setup_sectors = (c + 511) / 512; if (setup_sectors < SETUP_SECT_MIN) @@ -270,10 +170,6 @@ int main(int argc, char ** argv) i = setup_sectors*512; memset(buf+c, 0, i-c); -#ifdef CONFIG_EFI_STUB - update_pecoff_setup_and_reloc(i); -#endif - /* Set the default root device */ put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); @@ -298,8 +194,66 @@ int main(int argc, char ** argv) put_unaligned_le32(sys_size, &buf[0x1f4]); #ifdef CONFIG_EFI_STUB - update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); -#endif + file_sz = sz + i + ((sys_size * 16) - sz); + + pe_header = get_unaligned_le32(&buf[0x3c]); + + /* Size of image */ + put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); + + /* + * Subtract the size of the first section (512 bytes) which + * includes the header and .reloc section. The remaining size + * is that of the .text section. + */ + file_sz -= 512; + + /* Size of code */ + put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]); + +#ifdef CONFIG_X86_32 + /* + * Address of entry point. + * + * The EFI stub entry point is +16 bytes from the start of + * the .text section. + */ + put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); + + /* .text size */ + put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); + + /* .text vma */ + put_unaligned_le32(0x200, &buf[pe_header + 0xb4]); + + /* .text size of initialised data */ + put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]); + + /* .text file offset */ + put_unaligned_le32(0x200, &buf[pe_header + 0xbc]); +#else + /* + * Address of entry point. startup_32 is at the beginning and + * the 64-bit entry point (startup_64) is always 512 bytes + * after. The EFI stub entry point is 16 bytes after that, as + * the first instruction allows legacy loaders to jump over + * the EFI stub initialisation + */ + put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); + + /* .text size */ + put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); + + /* .text vma */ + put_unaligned_le32(0x200, &buf[pe_header + 0xc4]); + + /* .text size of initialised data */ + put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]); + + /* .text file offset */ + put_unaligned_le32(0x200, &buf[pe_header + 0xcc]); +#endif /* CONFIG_X86_32 */ +#endif /* CONFIG_EFI_STUB */ crc = partial_crc32(buf, i, crc); if (fwrite(buf, 1, i, stdout) != i) diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h index dc580c42851c..0e3793b821ef 100644 --- a/trunk/arch/x86/include/asm/nmi.h +++ b/trunk/arch/x86/include/asm/nmi.h @@ -54,20 +54,6 @@ struct nmiaction { __register_nmi_handler((t), &fn##_na); \ }) -/* - * For special handlers that register/unregister in the - * init section only. This should be considered rare. - */ -#define register_nmi_handler_initonly(t, fn, fg, n) \ -({ \ - static struct nmiaction fn##_na __initdata = { \ - .handler = (fn), \ - .name = (n), \ - .flags = (fg), \ - }; \ - __register_nmi_handler((t), &fn##_na); \ -}) - int __register_nmi_handler(unsigned int, struct nmiaction *); void unregister_nmi_handler(unsigned int, const char *); diff --git a/trunk/arch/x86/include/asm/uaccess.h b/trunk/arch/x86/include/asm/uaccess.h index e1f3a17034fc..04cd6882308e 100644 --- a/trunk/arch/x86/include/asm/uaccess.h +++ b/trunk/arch/x86/include/asm/uaccess.h @@ -33,8 +33,9 @@ #define segment_eq(a, b) ((a).seg == (b).seg) #define user_addr_max() (current_thread_info()->addr_limit.seg) -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < user_addr_max()) +#define __addr_ok(addr) \ + ((unsigned long __force)(addr) < \ + (current_thread_info()->addr_limit.seg)) /* * Test whether a block of memory is a valid user space address. @@ -46,14 +47,14 @@ * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ -#define __range_not_ok(addr, size, limit) \ +#define __range_not_ok(addr, size) \ ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ : "=&r" (flag), "=r" (roksum) \ : "1" (addr), "g" ((long)(size)), \ - "rm" (limit)); \ + "rm" (current_thread_info()->addr_limit.seg)); \ flag; \ }) @@ -76,8 +77,7 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) \ - (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) +#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) /* * The exception table consists of pairs of addresses relative to the diff --git a/trunk/arch/x86/include/asm/uv/uv_bau.h b/trunk/arch/x86/include/asm/uv/uv_bau.h index 6149b476d9df..becf47b81735 100644 --- a/trunk/arch/x86/include/asm/uv/uv_bau.h +++ b/trunk/arch/x86/include/asm/uv/uv_bau.h @@ -149,6 +149,7 @@ /* 4 bits of software ack period */ #define UV2_ACK_MASK 0x7UL #define UV2_ACK_UNITS_SHFT 3 +#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT /* diff --git a/trunk/arch/x86/kernel/aperture_64.c b/trunk/arch/x86/kernel/aperture_64.c index d5fd66f0d4cd..6e76c191a835 100644 --- a/trunk/arch/x86/kernel/aperture_64.c +++ b/trunk/arch/x86/kernel/aperture_64.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) return 0; } memblock_reserve(addr, aper_size); + /* + * Kmemleak should not scan this block as it may not be mapped via the + * kernel direct mapping. + */ + kmemleak_ignore(phys_to_virt(addr)); printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", aper_size >> 10, addr); insert_aperture_resource((u32)addr, aper_size); diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 5f0ff597437c..ac96561d1a99 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) BUG_ON(!cfg->vector); vector = cfg->vector; - for_each_cpu(cpu, cfg->domain) + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; @@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; - for_each_cpu(cpu, cfg->old_domain) { + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c index da27c5d2168a..a97f3c4a3946 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c @@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) static void __mcheck_cpu_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); - unsigned long iv = check_interval * HZ; + unsigned long iv = __this_cpu_read(mce_next_interval); setup_timer(t, mce_timer_fn, smp_processor_id()); diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index c4706cf9c011..e049d6da0183 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -1496,7 +1496,6 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) if (!cpuc->shared_regs) goto error; } - cpuc->is_fake = 1; return cpuc; error: free_fake_cpuc(cpuc); @@ -1757,12 +1756,6 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); } -static inline int -valid_user_frame(const void __user *fp, unsigned long size) -{ - return (__range_not_ok(fp, size, TASK_SIZE) == 0); -} - #ifdef CONFIG_COMPAT #include @@ -1787,7 +1780,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (bytes != sizeof(frame)) break; - if (!valid_user_frame(fp, sizeof(frame))) + if (fp < compat_ptr(regs->sp)) break; perf_callchain_store(entry, frame.return_address); @@ -1833,7 +1826,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) if (bytes != sizeof(frame)) break; - if (!valid_user_frame(fp, sizeof(frame))) + if ((unsigned long)fp < regs->sp) break; perf_callchain_store(entry, frame.return_address); diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index 7241e2fc3c17..6638aaf54493 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -117,7 +117,6 @@ struct cpu_hw_events { struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ unsigned int group_flag; - int is_fake; /* * Intel DebugStore bits @@ -365,7 +364,6 @@ struct x86_pmu { int pebs_record_size; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; - void (*pebs_aliases)(struct perf_event *event); /* * Intel LBR diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 187c294bc658..166546ec6aef 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -1119,33 +1119,27 @@ intel_bts_constraints(struct perf_event *event) return NULL; } -static int intel_alt_er(int idx) +static bool intel_try_alt_er(struct perf_event *event, int orig_idx) { if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) - return idx; + return false; - if (idx == EXTRA_REG_RSP_0) - return EXTRA_REG_RSP_1; - - if (idx == EXTRA_REG_RSP_1) - return EXTRA_REG_RSP_0; - - return idx; -} - -static void intel_fixup_er(struct perf_event *event, int idx) -{ - event->hw.extra_reg.idx = idx; - - if (idx == EXTRA_REG_RSP_0) { - event->hw.config &= ~INTEL_ARCH_EVENT_MASK; - event->hw.config |= 0x01b7; - event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; - } else if (idx == EXTRA_REG_RSP_1) { + if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { event->hw.config &= ~INTEL_ARCH_EVENT_MASK; event->hw.config |= 0x01bb; + event->hw.extra_reg.idx = EXTRA_REG_RSP_1; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; + } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; + event->hw.config |= 0x01b7; + event->hw.extra_reg.idx = EXTRA_REG_RSP_0; + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; } + + if (event->hw.extra_reg.idx == orig_idx) + return false; + + return true; } /* @@ -1163,18 +1157,14 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, struct event_constraint *c = &emptyconstraint; struct er_account *era; unsigned long flags; - int idx = reg->idx; + int orig_idx = reg->idx; - /* - * reg->alloc can be set due to existing state, so for fake cpuc we - * need to ignore this, otherwise we might fail to allocate proper fake - * state for this extra reg constraint. Also see the comment below. - */ - if (reg->alloc && !cpuc->is_fake) + /* already allocated shared msr */ + if (reg->alloc) return NULL; /* call x86_get_event_constraint() */ again: - era = &cpuc->shared_regs->regs[idx]; + era = &cpuc->shared_regs->regs[reg->idx]; /* * we use spin_lock_irqsave() to avoid lockdep issues when * passing a fake cpuc @@ -1183,29 +1173,6 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, if (!atomic_read(&era->ref) || era->config == reg->config) { - /* - * If its a fake cpuc -- as per validate_{group,event}() we - * shouldn't touch event state and we can avoid doing so - * since both will only call get_event_constraints() once - * on each event, this avoids the need for reg->alloc. - * - * Not doing the ER fixup will only result in era->reg being - * wrong, but since we won't actually try and program hardware - * this isn't a problem either. - */ - if (!cpuc->is_fake) { - if (idx != reg->idx) - intel_fixup_er(event, idx); - - /* - * x86_schedule_events() can call get_event_constraints() - * multiple times on events in the case of incremental - * scheduling(). reg->alloc ensures we only do the ER - * allocation once. - */ - reg->alloc = 1; - } - /* lock in msr value */ era->config = reg->config; era->reg = reg->reg; @@ -1213,17 +1180,17 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, /* one more user */ atomic_inc(&era->ref); + /* no need to reallocate during incremental event scheduling */ + reg->alloc = 1; + /* * need to call x86_get_event_constraint() * to check if associated event has constraints */ c = NULL; - } else { - idx = intel_alt_er(idx); - if (idx != reg->idx) { - raw_spin_unlock_irqrestore(&era->lock, flags); - goto again; - } + } else if (intel_try_alt_er(event, orig_idx)) { + raw_spin_unlock_irqrestore(&era->lock, flags); + goto again; } raw_spin_unlock_irqrestore(&era->lock, flags); @@ -1237,14 +1204,11 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, struct er_account *era; /* - * Only put constraint if extra reg was actually allocated. Also takes - * care of event which do not use an extra shared reg. - * - * Also, if this is a fake cpuc we shouldn't touch any event state - * (reg->alloc) and we don't care about leaving inconsistent cpuc state - * either since it'll be thrown out. + * only put constraint if extra reg was actually + * allocated. Also takes care of event which do + * not use an extra shared reg */ - if (!reg->alloc || cpuc->is_fake) + if (!reg->alloc) return; era = &cpuc->shared_regs->regs[reg->idx]; @@ -1336,9 +1300,15 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, intel_put_shared_regs_event_constraints(cpuc, event); } -static void intel_pebs_aliases_core2(struct perf_event *event) +static int intel_pmu_hw_config(struct perf_event *event) { - if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { + int ret = x86_pmu_hw_config(event); + + if (ret) + return ret; + + if (event->attr.precise_ip && + (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { /* * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P * (0x003c) so that we can use it with PEBS. @@ -1359,48 +1329,10 @@ static void intel_pebs_aliases_core2(struct perf_event *event) */ u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); - alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); - event->hw.config = alt_config; - } -} - -static void intel_pebs_aliases_snb(struct perf_event *event) -{ - if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { - /* - * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P - * (0x003c) so that we can use it with PEBS. - * - * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't - * PEBS capable. However we can use UOPS_RETIRED.ALL - * (0x01c2), which is a PEBS capable event, to get the same - * count. - * - * UOPS_RETIRED.ALL counts the number of cycles that retires - * CNTMASK micro-ops. By setting CNTMASK to a value (16) - * larger than the maximum number of micro-ops that can be - * retired per cycle (4) and then inverting the condition, we - * count all cycles that retire 16 or less micro-ops, which - * is every cycle. - * - * Thereby we gain a PEBS capable cycle counter. - */ - u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); event->hw.config = alt_config; } -} - -static int intel_pmu_hw_config(struct perf_event *event) -{ - int ret = x86_pmu_hw_config(event); - - if (ret) - return ret; - - if (event->attr.precise_ip && x86_pmu.pebs_aliases) - x86_pmu.pebs_aliases(event); if (intel_pmu_needs_lbr_smpl(event)) { ret = intel_pmu_setup_lbr_filter(event); @@ -1675,7 +1607,6 @@ static __initconst const struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .get_event_constraints = intel_get_event_constraints, .put_event_constraints = intel_put_event_constraints, - .pebs_aliases = intel_pebs_aliases_core2, .format_attrs = intel_arch3_formats_attr, @@ -1909,9 +1840,8 @@ __init int intel_pmu_init(void) break; case 42: /* SandyBridge */ - case 45: /* SandyBridge, "Romely-EP" */ x86_add_quirk(intel_sandybridge_quirk); - case 58: /* IvyBridge */ + case 45: /* SandyBridge, "Romely-EP" */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -1919,7 +1849,6 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; - x86_pmu.pebs_aliases = intel_pebs_aliases_snb; x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c index 35e2192df9f4..5a3edc27f6e5 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -400,7 +400,14 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ - INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ + INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ + INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ + INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ + INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ + INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ + INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ + INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ + INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ diff --git a/trunk/arch/x86/kernel/nmi_selftest.c b/trunk/arch/x86/kernel/nmi_selftest.c index 149b8d9c6ad4..e31bf8d5c4d2 100644 --- a/trunk/arch/x86/kernel/nmi_selftest.c +++ b/trunk/arch/x86/kernel/nmi_selftest.c @@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) static void __init init_nmi_testsuite(void) { /* trap all the unknown NMIs we may generate */ - register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); + register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); } static void __init cleanup_nmi_testsuite(void) @@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) { unsigned long timeout; - if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, + if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, NMI_FLAG_FIRST, "nmi_selftest")) { nmi_fail = FAILURE; return; diff --git a/trunk/arch/x86/kernel/reboot.c b/trunk/arch/x86/kernel/reboot.c index 25b48edb847c..79c45af81604 100644 --- a/trunk/arch/x86/kernel/reboot.c +++ b/trunk/arch/x86/kernel/reboot.c @@ -639,11 +639,9 @@ void native_machine_shutdown(void) set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); /* - * O.K Now that I'm on the appropriate processor, stop all of the - * others. Also disable the local irq to not receive the per-cpu - * timer interrupt which may trigger scheduler's load balance. + * O.K Now that I'm on the appropriate processor, + * stop all of the others. */ - local_irq_disable(); stop_other_cpus(); #endif diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index fd019d78b1f4..3fab55bea29b 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -382,6 +382,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) if ((i == cpu) || (has_mc && match_llc(c, o))) link_mask(llc_shared, cpu, i); + } + + /* + * This needs a separate iteration over the cpus because we rely on all + * cpu_sibling_mask links to be set-up. + */ + for_each_cpu(i, cpu_sibling_setup_mask) { + o = &cpu_data(i); + if ((i == cpu) || (has_mc && match_mc(c, o))) { link_mask(core, cpu, i); diff --git a/trunk/arch/x86/lib/usercopy.c b/trunk/arch/x86/lib/usercopy.c index 677b1ed184c9..f61ee67ec00f 100644 --- a/trunk/arch/x86/lib/usercopy.c +++ b/trunk/arch/x86/lib/usercopy.c @@ -8,7 +8,6 @@ #include #include -#include /* * best effort, GUP based copy_from_user() that is NMI-safe @@ -22,9 +21,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) void *map; int ret; - if (__range_not_ok(from, n, TASK_SIZE) == 0) - return len; - do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) diff --git a/trunk/arch/x86/lib/x86-opcode-map.txt b/trunk/arch/x86/lib/x86-opcode-map.txt index 5d7e51f3fd28..819137904428 100644 --- a/trunk/arch/x86/lib/x86-opcode-map.txt +++ b/trunk/arch/x86/lib/x86-opcode-map.txt @@ -28,7 +28,7 @@ # - (66): the last prefix is 0x66 # - (F3): the last prefix is 0xF3 # - (F2): the last prefix is 0xF2 -# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) +# Table: one byte opcode Referrer: @@ -515,12 +515,12 @@ b4: LFS Gv,Mp b5: LGS Gv,Mp b6: MOVZX Gv,Eb b7: MOVZX Gv,Ew -b8: JMPE (!F3) | POPCNT Gv,Ev (F3) +b8: JMPE | POPCNT Gv,Ev (F3) b9: Grp10 (1A) ba: Grp8 Ev,Ib (1A) bb: BTC Ev,Gv -bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) -bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) +bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) +bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) be: MOVSX Gv,Eb bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index bc4e9d84157f..97141c26a13a 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -62,8 +62,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en extra += PMD_SIZE; #endif /* The first 2/4M doesn't use large pages. */ - if (mr->start < PMD_SIZE) - extra += mr->end - mr->start; + extra += mr->end - mr->start; ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; } else diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c index 4599c3e8bcb6..732af3a96183 100644 --- a/trunk/arch/x86/mm/srat.c +++ b/trunk/arch/x86/mm/srat.c @@ -176,8 +176,6 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) return; } - node_set(node, numa_nodes_parsed); - printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1); diff --git a/trunk/arch/x86/platform/mrst/mrst.c b/trunk/arch/x86/platform/mrst/mrst.c index fd41a9262d65..e31bcd8f2eee 100644 --- a/trunk/arch/x86/platform/mrst/mrst.c +++ b/trunk/arch/x86/platform/mrst/mrst.c @@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier); EXPORT_SYMBOL_GPL(intel_scu_notifier); /* Called by IPC driver */ -void __devinit intel_scu_devices_create(void) +void intel_scu_devices_create(void) { int i; diff --git a/trunk/arch/x86/platform/uv/tlb_uv.c b/trunk/arch/x86/platform/uv/tlb_uv.c index 59880afa851f..3ae0e61abd23 100644 --- a/trunk/arch/x86/platform/uv/tlb_uv.c +++ b/trunk/arch/x86/platform/uv/tlb_uv.c @@ -1295,6 +1295,7 @@ static void __init enable_timeouts(void) */ mmr_image |= (1L << SOFTACK_MSHIFT); if (is_uv2_hub()) { + mmr_image &= ~(1L << UV2_LEG_SHFT); mmr_image |= (1L << UV2_EXT_SHFT); } write_mmr_misc_control(pnode, mmr_image); diff --git a/trunk/arch/x86/tools/gen-insn-attr-x86.awk b/trunk/arch/x86/tools/gen-insn-attr-x86.awk index ddcf39b1a18d..5f6a5b6c3a15 100644 --- a/trunk/arch/x86/tools/gen-insn-attr-x86.awk +++ b/trunk/arch/x86/tools/gen-insn-attr-x86.awk @@ -66,10 +66,9 @@ BEGIN { rex_expr = "^REX(\\.[XRWB]+)*" fpu_expr = "^ESC" # TODO - lprefix1_expr = "\\((66|!F3)\\)" + lprefix1_expr = "\\(66\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\((F2|!F3)\\)" - lprefix_expr = "\\((66|F2|F3)\\)" + lprefix3_expr = "\\(F2\\)" max_lprefix = 4 # All opcodes starting with lower-case 'v' or with (v1) superscript @@ -334,16 +333,13 @@ function convert_operands(count,opnd, i,j,imm,mod) if (match(ext, lprefix1_expr)) { lptable1[idx] = add_flags(lptable1[idx],flags) variant = "INAT_VARIANT" - } - if (match(ext, lprefix2_expr)) { + } else if (match(ext, lprefix2_expr)) { lptable2[idx] = add_flags(lptable2[idx],flags) variant = "INAT_VARIANT" - } - if (match(ext, lprefix3_expr)) { + } else if (match(ext, lprefix3_expr)) { lptable3[idx] = add_flags(lptable3[idx],flags) variant = "INAT_VARIANT" - } - if (!match(ext, lprefix_expr)){ + } else { table[idx] = add_flags(table[idx],flags) } } diff --git a/trunk/drivers/acpi/Kconfig b/trunk/drivers/acpi/Kconfig index 80998958cf45..47768ff87343 100644 --- a/trunk/drivers/acpi/Kconfig +++ b/trunk/drivers/acpi/Kconfig @@ -208,7 +208,7 @@ config ACPI_IPMI config ACPI_HOTPLUG_CPU bool - depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU + depends on ACPI_PROCESSOR && HOTPLUG_CPU select ACPI_CONTAINER default y diff --git a/trunk/drivers/acpi/battery.c b/trunk/drivers/acpi/battery.c index 7dd3f9fb9f3f..86933ca8b472 100644 --- a/trunk/drivers/acpi/battery.c +++ b/trunk/drivers/acpi/battery.c @@ -643,19 +643,11 @@ static int acpi_battery_update(struct acpi_battery *battery) static void acpi_battery_refresh(struct acpi_battery *battery) { - int power_unit; - if (!battery->bat.dev) return; - power_unit = battery->power_unit; - acpi_battery_get_info(battery); - - if (power_unit == battery->power_unit) - return; - - /* The battery has changed its reporting units. */ + /* The battery may have changed its reporting units. */ sysfs_remove_battery(battery); sysfs_add_battery(battery); } diff --git a/trunk/drivers/acpi/processor_perflib.c b/trunk/drivers/acpi/processor_perflib.c index a093dc163a42..0af48a8554cd 100644 --- a/trunk/drivers/acpi/processor_perflib.c +++ b/trunk/drivers/acpi/processor_perflib.c @@ -333,7 +333,6 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) struct acpi_buffer state = { 0, NULL }; union acpi_object *pss = NULL; int i; - int last_invalid = -1; status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); @@ -395,33 +394,14 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) ((u32)(px->core_frequency * 1000) != (px->core_frequency * 1000))) { printk(KERN_ERR FW_BUG PREFIX - "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", - pr->id, px->core_frequency); - if (last_invalid == -1) - last_invalid = i; - } else { - if (last_invalid != -1) { - /* - * Copy this valid entry over last_invalid entry - */ - memcpy(&(pr->performance->states[last_invalid]), - px, sizeof(struct acpi_processor_px)); - ++last_invalid; - } + "Invalid BIOS _PSS frequency: 0x%llx MHz\n", + px->core_frequency); + result = -EFAULT; + kfree(pr->performance->states); + goto end; } } - if (last_invalid == 0) { - printk(KERN_ERR FW_BUG PREFIX - "No valid BIOS _PSS frequency found for processor %d\n", pr->id); - result = -EFAULT; - kfree(pr->performance->states); - pr->performance->states = NULL; - } - - if (last_invalid > 0) - pr->performance->state_count = last_invalid; - end: kfree(buffer.pointer); diff --git a/trunk/drivers/acpi/video.c b/trunk/drivers/acpi/video.c index a576575617d7..9577b6fa2650 100644 --- a/trunk/drivers/acpi/video.c +++ b/trunk/drivers/acpi/video.c @@ -1687,6 +1687,10 @@ static int acpi_video_bus_add(struct acpi_device *device) set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); set_bit(KEY_DISPLAY_OFF, input->keybit); + error = input_register_device(input); + if (error) + goto err_stop_video; + printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), video->flags.multihead ? "yes" : "no", @@ -1697,16 +1701,12 @@ static int acpi_video_bus_add(struct acpi_device *device) video->pm_nb.priority = 0; error = register_pm_notifier(&video->pm_nb); if (error) - goto err_stop_video; - - error = input_register_device(input); - if (error) - goto err_unregister_pm_notifier; + goto err_unregister_input_dev; return 0; - err_unregister_pm_notifier: - unregister_pm_notifier(&video->pm_nb); + err_unregister_input_dev: + input_unregister_device(input); err_stop_video: acpi_video_bus_stop_devices(video); err_free_input_dev: @@ -1743,18 +1743,9 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) return 0; } -static int __init is_i740(struct pci_dev *dev) -{ - if (dev->device == 0x00D1) - return 1; - if (dev->device == 0x7000) - return 1; - return 0; -} - static int __init intel_opregion_present(void) { - int opregion = 0; +#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) struct pci_dev *dev = NULL; u32 address; @@ -1763,15 +1754,13 @@ static int __init intel_opregion_present(void) continue; if (dev->vendor != PCI_VENDOR_ID_INTEL) continue; - /* We don't want to poke around undefined i740 registers */ - if (is_i740(dev)) - continue; pci_read_config_dword(dev, 0xfc, &address); if (!address) continue; - opregion = 1; + return 1; } - return opregion; +#endif + return 0; } int acpi_video_register(void) diff --git a/trunk/drivers/char/agp/intel-agp.c b/trunk/drivers/char/agp/intel-agp.c index 0a4185279417..764f70c5e690 100644 --- a/trunk/drivers/char/agp/intel-agp.c +++ b/trunk/drivers/char/agp/intel-agp.c @@ -898,7 +898,6 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_B43_HB), ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), - ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h index 8e2d9140f300..c0091753a0d1 100644 --- a/trunk/drivers/char/agp/intel-agp.h +++ b/trunk/drivers/char/agp/intel-agp.h @@ -212,7 +212,6 @@ #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 -#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c index d6de2e07fa03..420953197d0a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = { }; static struct drm_driver exynos_drm_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | - DRIVER_GEM | DRIVER_PRIME, + .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | + DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = exynos_drm_load, .unload = exynos_drm_unload, .open = exynos_drm_open, diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 23d5ad379f86..6e9ac7bd1dcf 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -172,12 +172,19 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) manager_ops->commit(manager->dev); } +static struct drm_crtc * +exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) +{ + return encoder->crtc; +} + static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { .dpms = exynos_drm_encoder_dpms, .mode_fixup = exynos_drm_encoder_mode_fixup, .mode_set = exynos_drm_encoder_mode_set, .prepare = exynos_drm_encoder_prepare, .commit = exynos_drm_encoder_commit, + .get_crtc = exynos_drm_encoder_get_crtc, }; static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c index 4ccfe4328fab..f82a299553fb 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -51,22 +51,11 @@ struct exynos_drm_fb { static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - unsigned int i; DRM_DEBUG_KMS("%s\n", __FILE__); drm_framebuffer_cleanup(fb); - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { - struct drm_gem_object *obj; - - if (exynos_fb->exynos_gem_obj[i] == NULL) - continue; - - obj = &exynos_fb->exynos_gem_obj[i]->base; - drm_gem_object_unreference_unlocked(obj); - } - kfree(exynos_fb); exynos_fb = NULL; } @@ -145,11 +134,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } + drm_gem_object_unreference_unlocked(obj); + fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); - if (IS_ERR(fb)) { - drm_gem_object_unreference_unlocked(obj); + if (IS_ERR(fb)) return fb; - } exynos_fb = to_exynos_fb(fb); nr = exynos_drm_format_num_buffers(fb->pixel_format); @@ -163,6 +152,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } + drm_gem_object_unreference_unlocked(obj); + exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.h b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.h index 50823756cdea..3ecb30d93552 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -31,10 +31,10 @@ static inline int exynos_drm_format_num_buffers(uint32_t format) { switch (format) { - case DRM_FORMAT_NV12: + case DRM_FORMAT_NV12M: case DRM_FORMAT_NV12MT: return 2; - case DRM_FORMAT_YUV420: + case DRM_FORMAT_YUV420M: return 3; default: return 1; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c index 5c8b683029ea..fc91293c4560 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -689,6 +689,7 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) { + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; int ret = 0; @@ -709,13 +710,15 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - if (!obj->map_list.map) { - ret = drm_gem_create_mmap_offset(obj); + exynos_gem_obj = to_exynos_gem_obj(obj); + + if (!exynos_gem_obj->base.map_list.map) { + ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base); if (ret) goto out; } - *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; + *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); out: diff --git a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c index e2147a2ddcec..68ef01028375 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c @@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) switch (win_data->pixel_format) { case DRM_FORMAT_NV12MT: tiled_mode = true; - case DRM_FORMAT_NV12: + case DRM_FORMAT_NV12M: crcb_mode = false; buf_num = 2; break; @@ -601,19 +601,17 @@ static void mixer_win_reset(struct mixer_context *ctx) mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); /* setting graphical layers */ + val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ val |= MXR_GRP_CFG_WIN_BLEND_EN; - val |= MXR_GRP_CFG_BLEND_PRE_MUL; - val |= MXR_GRP_CFG_PIXEL_BLEND_EN; val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ /* the same configuration for both layers */ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); - mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); - /* setting video layers */ - val = MXR_GRP_CFG_ALPHA_VAL(0); - mixer_reg_write(res, MXR_VIDEO_CFG, val); + val |= MXR_GRP_CFG_BLEND_PRE_MUL; + val |= MXR_GRP_CFG_PIXEL_BLEND_EN; + mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); /* configuration of Video Processor Registers */ vp_win_reset(ctx); diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index 9fe9ebe52a7a..238a52165833 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -233,7 +233,6 @@ static const struct intel_device_info intel_sandybridge_d_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, - .has_force_wake = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { @@ -244,7 +243,6 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, - .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { @@ -254,7 +252,6 @@ static const struct intel_device_info intel_ivybridge_d_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, - .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { @@ -265,7 +262,6 @@ static const struct intel_device_info intel_ivybridge_m_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, - .has_force_wake = 1, }; static const struct intel_device_info intel_valleyview_m_info = { @@ -293,7 +289,6 @@ static const struct intel_device_info intel_haswell_d_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, - .has_force_wake = 1, }; static const struct intel_device_info intel_haswell_m_info = { @@ -303,7 +298,6 @@ static const struct intel_device_info intel_haswell_m_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, - .has_force_wake = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ @@ -1145,9 +1139,10 @@ MODULE_LICENSE("GPL and additional rights"); /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(dev_priv, reg) \ - ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) + (((dev_priv)->info->gen >= 6) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) && \ + (!IS_VALLEYVIEW((dev_priv)->dev)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index b0b676abde0d..c9cfc67c2cf5 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -285,7 +285,6 @@ struct intel_device_info { u8 is_ivybridge:1; u8 is_valleyview:1; u8 has_pch_split:1; - u8 has_force_wake:1; u8 is_haswell:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; @@ -1102,8 +1101,6 @@ struct drm_i915_file_private { #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) -#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) - #include "i915_trace.h" /** diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index b1fe0edda955..1417660a93ec 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -510,7 +510,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) return ret; } -static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) +static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; @@ -550,35 +550,6 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); } -static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int pipe; - - if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) - DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", - (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> - SDE_AUDIO_POWER_SHIFT_CPT); - - if (pch_iir & SDE_AUX_MASK_CPT) - DRM_DEBUG_DRIVER("AUX channel interrupt\n"); - - if (pch_iir & SDE_GMBUS_CPT) - DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); - - if (pch_iir & SDE_AUDIO_CP_REQ_CPT) - DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); - - if (pch_iir & SDE_AUDIO_CP_CHG_CPT) - DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); - - if (pch_iir & SDE_FDI_MASK_CPT) - for_each_pipe(pipe) - DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", - pipe_name(pipe), - I915_READ(FDI_RX_IIR(pipe))); -} - static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -620,7 +591,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) if (pch_iir & SDE_HOTPLUG_MASK_CPT) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - cpt_irq_handler(dev, pch_iir); + pch_irq_handler(dev, pch_iir); /* clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); @@ -713,10 +684,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) if (de_iir & DE_PCH_EVENT) { if (pch_iir & hotplug_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); - else - ibx_irq_handler(dev, pch_iir); + pch_irq_handler(dev, pch_iir); } if (de_iir & DE_PCU_EVENT) { diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 48d5e8e051cf..2d49b9507ed0 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -210,14 +210,6 @@ #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) -/* IVB has funny definitions for which plane to flip. */ -#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) -#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) -#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) -#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) -#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) -#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) - #define MI_SET_CONTEXT MI_INSTR(0x18, 0) #define MI_MM_SPACE_GTT (1<<8) #define MI_MM_SPACE_PHYSICAL (0<<8) @@ -3321,7 +3313,7 @@ /* PCH */ -/* south display engine interrupt: IBX */ +/* south display engine interrupt */ #define SDE_AUDIO_POWER_D (1 << 27) #define SDE_AUDIO_POWER_C (1 << 26) #define SDE_AUDIO_POWER_B (1 << 25) @@ -3357,44 +3349,15 @@ #define SDE_TRANSA_CRC_ERR (1 << 1) #define SDE_TRANSA_FIFO_UNDER (1 << 0) #define SDE_TRANS_MASK (0x3f) - -/* south display engine interrupt: CPT/PPT */ -#define SDE_AUDIO_POWER_D_CPT (1 << 31) -#define SDE_AUDIO_POWER_C_CPT (1 << 30) -#define SDE_AUDIO_POWER_B_CPT (1 << 29) -#define SDE_AUDIO_POWER_SHIFT_CPT 29 -#define SDE_AUDIO_POWER_MASK_CPT (7 << 29) -#define SDE_AUXD_CPT (1 << 27) -#define SDE_AUXC_CPT (1 << 26) -#define SDE_AUXB_CPT (1 << 25) -#define SDE_AUX_MASK_CPT (7 << 25) +/* CPT */ +#define SDE_CRT_HOTPLUG_CPT (1 << 19) #define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTC_HOTPLUG_CPT (1 << 22) #define SDE_PORTB_HOTPLUG_CPT (1 << 21) -#define SDE_CRT_HOTPLUG_CPT (1 << 19) #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ SDE_PORTD_HOTPLUG_CPT | \ SDE_PORTC_HOTPLUG_CPT | \ SDE_PORTB_HOTPLUG_CPT) -#define SDE_GMBUS_CPT (1 << 17) -#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) -#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) -#define SDE_FDI_RXC_CPT (1 << 8) -#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) -#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) -#define SDE_FDI_RXB_CPT (1 << 4) -#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) -#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) -#define SDE_FDI_RXA_CPT (1 << 0) -#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ - SDE_AUDIO_CP_REQ_B_CPT | \ - SDE_AUDIO_CP_REQ_A_CPT) -#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ - SDE_AUDIO_CP_CHG_B_CPT | \ - SDE_AUDIO_CP_CHG_A_CPT) -#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ - SDE_FDI_RXB_CPT | \ - SDE_FDI_RXA_CPT) #define SDEISR 0xc4000 #define SDEIMR 0xc4004 diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index e0aa064def31..914789420906 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -6158,34 +6158,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; - uint32_t plane_bit = 0; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) goto err; - switch(intel_crtc->plane) { - case PLANE_A: - plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; - break; - case PLANE_B: - plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; - break; - case PLANE_C: - plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; - break; - default: - WARN_ONCE(1, "unknown plane in flip command\n"); - ret = -ENODEV; - goto err; - } - ret = intel_ring_begin(ring, 4); if (ret) goto err_unpin; - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (obj->gtt_offset)); intel_ring_emit(ring, (MI_NOOP)); diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index e5b84ff89ca5..b59b6d5b7583 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -266,15 +266,10 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring) { - struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj = ring->obj; - int ret = 0; u32 head; - if (HAS_FORCE_WAKE(dev)) - gen6_gt_force_wake_get(dev_priv); - /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); @@ -322,8 +317,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); - ret = -EIO; - goto out; + return -EIO; } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) @@ -332,14 +326,9 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->head = I915_READ_HEAD(ring); ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring_space(ring); - ring->last_retired_head = -1; } -out: - if (HAS_FORCE_WAKE(dev)) - gen6_gt_force_wake_put(dev_priv); - - return ret; + return 0; } static int @@ -998,10 +987,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto err_unref; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto err_unpin; - ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, ring->size); if (ring->virtual_start == NULL) { diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index b18870c455ad..55ab284e22f2 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -1593,10 +1593,6 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, struct net_device *pdev; pdev = ip_dev_find(&init_net, peer_ip); - if (!pdev) { - err = -ENODEV; - goto out; - } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, 0); if (!ep->l2t) diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index 3530c41fcd1f..ee1c577238f7 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_mr_size = ~0ull; props->page_size_cap = dev->dev->caps.page_size_cap; props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; - props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; + props->max_qp_wr = dev->dev->caps.max_wqes; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; @@ -1084,9 +1084,12 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) int total_eqs = 0; int i, j, eq; - /* Legacy mode or comp_pool is not large enough */ - if (dev->caps.comp_pool == 0 || - dev->caps.num_ports > dev->caps.comp_pool) + /* Init eq table */ + ibdev->eq_table = NULL; + ibdev->eq_added = 0; + + /* Legacy mode? */ + if (dev->caps.comp_pool == 0) return; eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ @@ -1132,10 +1135,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i; - - /* no additional eqs were added */ - if (!ibdev->eq_table) - return; + int total_eqs; /* Reset the advertised EQ number */ ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; @@ -1148,7 +1148,12 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) mlx4_release_eq(dev, ibdev->eq_table[i]); } + total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added; + memset(ibdev->eq_table, 0, total_eqs * sizeof(int)); kfree(ibdev->eq_table); + + ibdev->eq_table = NULL; + ibdev->eq_added = 0; } static void *mlx4_ib_add(struct mlx4_dev *dev) diff --git a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h index ff36655d23d3..e62297cc77cc 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -44,14 +44,6 @@ #include #include -enum { - MLX4_IB_SQ_MIN_WQE_SHIFT = 6, - MLX4_IB_MAX_HEADROOM = 2048 -}; - -#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) -#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) - struct mlx4_ib_ucontext { struct ib_ucontext ibucontext; struct mlx4_uar uar; diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index 8d4ed24aef93..ceb33327091a 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) { /* Sanity check RQ size before proceeding */ - if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || - cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) + if (cap->max_recv_wr > dev->dev->caps.max_wqes || + cap->max_recv_sge > dev->dev->caps.max_rq_sg) return -EINVAL; if (!has_rq) { @@ -329,17 +329,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); } - /* leave userspace return values as they were, so as not to break ABI */ - if (is_user) { - cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; - cap->max_recv_sge = qp->rq.max_gs; - } else { - cap->max_recv_wr = qp->rq.max_post = - min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); - cap->max_recv_sge = min(qp->rq.max_gs, - min(dev->dev->caps.max_sq_sg, - dev->dev->caps.max_rq_sg)); - } + cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; + cap->max_recv_sge = qp->rq.max_gs; return 0; } @@ -350,8 +341,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int s; /* Sanity check SQ size before proceeding */ - if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || - cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || + if (cap->max_send_wr > dev->dev->caps.max_wqes || + cap->max_send_sge > dev->dev->caps.max_sq_sg || cap->max_inline_data + send_wqe_overhead(type, qp->flags) + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) return -EINVAL; diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma.h b/trunk/drivers/infiniband/hw/ocrdma/ocrdma.h index 037f5cea85bd..85a69c958559 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -231,6 +231,7 @@ struct ocrdma_qp_hwq_info { u32 entry_size; u32 max_cnt; u32 max_wqe_idx; + u32 free_delta; u16 dbid; /* qid, where to ring the doorbell. */ u32 len; dma_addr_t pa; diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index 517ab20b727c..a411a4e3193d 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_abi.h @@ -101,6 +101,8 @@ struct ocrdma_create_qp_uresp { u32 rsvd1; u32 num_wqe_allocated; u32 num_rqe_allocated; + u32 free_wqe_delta; + u32 free_rqe_delta; u32 db_sq_offset; u32 db_rq_offset; u32 db_shift; @@ -124,7 +126,8 @@ struct ocrdma_create_srq_uresp { u32 db_rq_offset; u32 db_shift; - u64 rsvd2; + u32 free_rqe_delta; + u32 rsvd2; u64 rsvd3; } __packed; diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 9343a1522977..9b204b1ba336 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, break; case OCRDMA_SRQ_LIMIT_EVENT: ib_evt.element.srq = &qp->srq->ibsrq; - ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; + ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; srq_event = 1; qp_event = 0; break; @@ -1990,12 +1990,19 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, max_wqe_allocated = 1 << max_wqe_allocated; max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); + if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { + qp->sq.free_delta = 0; + qp->rq.free_delta = 1; + } else + qp->sq.free_delta = 1; + qp->sq.max_cnt = max_wqe_allocated; qp->sq.max_wqe_idx = max_wqe_allocated - 1; if (!attrs->srq) { qp->rq.max_cnt = max_rqe_allocated; qp->rq.max_wqe_idx = max_rqe_allocated - 1; + qp->rq.free_delta = 1; } } diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 04fef3de6d75..a20d16eaae71 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -26,6 +26,7 @@ *******************************************************************/ #include +#include #include #include #include diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index d16d172b6b6b..e9f74d1b48f6 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -940,6 +940,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; uresp.db_shift = 16; } + uresp.free_wqe_delta = qp->sq.free_delta; + uresp.free_rqe_delta = qp->rq.free_delta; if (qp->dpp_enabled) { uresp.dpp_credit = dpp_credit_lmt; @@ -1305,6 +1307,8 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) free_cnt = (q->max_cnt - q->head) + q->tail; else free_cnt = q->tail - q->head; + if (q->free_delta) + free_cnt -= q->free_delta; return free_cnt; } @@ -1497,6 +1501,7 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) (srq->pd->id * srq->dev->nic_info.db_page_size); uresp.db_page_size = srq->dev->nic_info.db_page_size; uresp.num_rqe_allocated = srq->rq.max_cnt; + uresp.free_rqe_delta = 1; if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; uresp.db_shift = 24; diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index 633f03d80274..e6483439f25f 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -28,6 +28,7 @@ #ifndef __OCRDMA_VERBS_H__ #define __OCRDMA_VERBS_H__ +#include int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, struct ib_send_wr **bad_wr); int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, diff --git a/trunk/drivers/iommu/amd_iommu.c b/trunk/drivers/iommu/amd_iommu.c index a2e418cba0ff..d90a421e9cac 100644 --- a/trunk/drivers/iommu/amd_iommu.c +++ b/trunk/drivers/iommu/amd_iommu.c @@ -547,12 +547,26 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } -static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) +static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) { struct amd_iommu_fault fault; + volatile u64 *raw; + int i; INC_STATS_COUNTER(pri_requests); + raw = (u64 *)(iommu->ppr_log + head); + + /* + * Hardware bug: Interrupt may arrive before the entry is written to + * memory. If this happens we need to wait for the entry to arrive. + */ + for (i = 0; i < LOOP_TIMEOUT; ++i) { + if (PPR_REQ_TYPE(raw[0]) != 0) + break; + udelay(1); + } + if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); return; @@ -564,6 +578,12 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) fault.tag = PPR_TAG(raw[0]); fault.flags = PPR_FLAGS(raw[0]); + /* + * To detect the hardware bug we need to clear the entry + * to back to zero. + */ + raw[0] = raw[1] = 0; + atomic_notifier_call_chain(&ppr_notifier, 0, &fault); } @@ -575,62 +595,25 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) if (iommu->ppr_log == NULL) return; - /* enable ppr interrupts again */ - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); - spin_lock_irqsave(&iommu->lock, flags); head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); while (head != tail) { - volatile u64 *raw; - u64 entry[2]; - int i; - - raw = (u64 *)(iommu->ppr_log + head); - - /* - * Hardware bug: Interrupt may arrive before the entry is - * written to memory. If this happens we need to wait for the - * entry to arrive. - */ - for (i = 0; i < LOOP_TIMEOUT; ++i) { - if (PPR_REQ_TYPE(raw[0]) != 0) - break; - udelay(1); - } - - /* Avoid memcpy function-call overhead */ - entry[0] = raw[0]; - entry[1] = raw[1]; - /* - * To detect the hardware bug we need to clear the entry - * back to zero. - */ - raw[0] = raw[1] = 0UL; + /* Handle PPR entry */ + iommu_handle_ppr_entry(iommu, head); - /* Update head pointer of hardware ring-buffer */ + /* Update and refresh ring-buffer state*/ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - - /* - * Release iommu->lock because ppr-handling might need to - * re-aquire it - */ - spin_unlock_irqrestore(&iommu->lock, flags); - - /* Handle PPR entry */ - iommu_handle_ppr_entry(iommu, entry); - - spin_lock_irqsave(&iommu->lock, flags); - - /* Refresh ring-buffer information */ - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); } + /* enable ppr interrupts again */ + writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); + spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/trunk/drivers/iommu/amd_iommu_init.c b/trunk/drivers/iommu/amd_iommu_init.c index 542024ba6dba..c56790375e0f 100644 --- a/trunk/drivers/iommu/amd_iommu_init.c +++ b/trunk/drivers/iommu/amd_iommu_init.c @@ -1029,9 +1029,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (!iommu->dev) return 1; - iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, - PCI_DEVFN(0, 0)); - iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; @@ -1326,16 +1323,20 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) { int i, j; u32 ioc_feature_control; - struct pci_dev *pdev = iommu->root_pdev; + struct pci_dev *pdev = NULL; /* RD890 BIOSes may not have completely reconfigured the iommu */ - if (!is_rd890_iommu(iommu->dev) || !pdev) + if (!is_rd890_iommu(iommu->dev)) return; /* * First, we need to ensure that the iommu is enabled. This is * controlled by a register in the northbridge */ + pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); + + if (!pdev) + return; /* Select Northbridge indirect register 0x75 and enable writing */ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); @@ -1345,6 +1346,8 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) if (!(ioc_feature_control & 0x1)) pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); + pci_dev_put(pdev); + /* Restore the iommu BAR */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo); diff --git a/trunk/drivers/iommu/amd_iommu_types.h b/trunk/drivers/iommu/amd_iommu_types.h index 24355559a2ad..2452f3b71736 100644 --- a/trunk/drivers/iommu/amd_iommu_types.h +++ b/trunk/drivers/iommu/amd_iommu_types.h @@ -481,9 +481,6 @@ struct amd_iommu { /* Pointer to PCI device of this IOMMU */ struct pci_dev *dev; - /* Cache pdev to root device for resume quirks */ - struct pci_dev *root_pdev; - /* physical address of MMIO space */ u64 mmio_phys; /* virtual address of MMIO space */ diff --git a/trunk/drivers/md/raid1.c b/trunk/drivers/md/raid1.c index a9c7981ddd24..835de7168cd3 100644 --- a/trunk/drivers/md/raid1.c +++ b/trunk/drivers/md/raid1.c @@ -2550,7 +2550,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -EINVAL; spin_lock_init(&conf->device_lock); rdev_for_each(rdev, mddev) { - struct request_queue *q; int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) @@ -2563,9 +2562,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev) goto abort; disk->rdev = rdev; - q = bdev_get_queue(rdev->bdev); - if (q->merge_bvec_fn) - mddev->merge_check_needed = 1; disk->head_position = 0; } diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index 99ae6068e456..987db37cb875 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -3475,7 +3475,6 @@ static int run(struct mddev *mddev) rdev_for_each(rdev, mddev) { long long diff; - struct request_queue *q; disk_idx = rdev->raid_disk; if (disk_idx < 0) @@ -3494,9 +3493,6 @@ static int run(struct mddev *mddev) goto out_free_conf; disk->rdev = rdev; } - q = bdev_get_queue(rdev->bdev); - if (q->merge_bvec_fn) - mddev->merge_check_needed = 1; diff = (rdev->new_data_offset - rdev->data_offset); if (!mddev->reshape_backwards) diff = -diff; diff --git a/trunk/drivers/mtd/ubi/debug.c b/trunk/drivers/mtd/ubi/debug.c index 09d4f8d9d592..9f957c2d48e9 100644 --- a/trunk/drivers/mtd/ubi/debug.c +++ b/trunk/drivers/mtd/ubi/debug.c @@ -264,9 +264,6 @@ static struct dentry *dfs_rootdir; */ int ubi_debugfs_init(void) { - if (!IS_ENABLED(DEBUG_FS)) - return 0; - dfs_rootdir = debugfs_create_dir("ubi", NULL); if (IS_ERR_OR_NULL(dfs_rootdir)) { int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); @@ -284,8 +281,7 @@ int ubi_debugfs_init(void) */ void ubi_debugfs_exit(void) { - if (IS_ENABLED(DEBUG_FS)) - debugfs_remove(dfs_rootdir); + debugfs_remove(dfs_rootdir); } /* Read an UBI debugfs file */ @@ -407,9 +403,6 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) struct dentry *dent; struct ubi_debug_info *d = ubi->dbg; - if (!IS_ENABLED(DEBUG_FS)) - return 0; - n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, ubi->ubi_num); if (n == UBI_DFS_DIR_LEN) { @@ -477,6 +470,5 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) */ void ubi_debugfs_exit_dev(struct ubi_device *ubi) { - if (IS_ENABLED(DEBUG_FS)) - debugfs_remove_recursive(ubi->dbg->dfs_dir); + debugfs_remove_recursive(ubi->dbg->dfs_dir); } diff --git a/trunk/drivers/mtd/ubi/wl.c b/trunk/drivers/mtd/ubi/wl.c index b6be644e7b85..9df100a4ec38 100644 --- a/trunk/drivers/mtd/ubi/wl.c +++ b/trunk/drivers/mtd/ubi/wl.c @@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) dbg_wl("flush pending work for LEB %d:%d (%d pending works)", vol_id, lnum, ubi->works_count); + down_write(&ubi->work_sem); while (found) { struct ubi_work *wrk; found = 0; - down_read(&ubi->work_sem); spin_lock(&ubi->wl_lock); list_for_each_entry(wrk, &ubi->works, list) { if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && @@ -1277,27 +1277,18 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) spin_unlock(&ubi->wl_lock); err = wrk->func(ubi, wrk, 0); - if (err) { - up_read(&ubi->work_sem); - return err; - } - + if (err) + goto out; spin_lock(&ubi->wl_lock); found = 1; break; } } spin_unlock(&ubi->wl_lock); - up_read(&ubi->work_sem); } - /* - * Make sure all the works which have been done in parallel are - * finished. - */ - down_write(&ubi->work_sem); +out: up_write(&ubi->work_sem); - return err; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/port.c b/trunk/drivers/net/ethernet/mellanox/mlx4/port.c index a8fb52992c64..1fe2c7a8b40c 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/port.c @@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, if (slave != dev->caps.function) memset(inbox->buf, 0, 256); if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { - *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; + *(u8 *) inbox->buf = !!reset_qkey_viols << 6; ((__be32 *) inbox->buf)[2] = agg_cap_mask; } else { - ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; + ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; ((__be32 *) inbox->buf)[1] = agg_cap_mask; } diff --git a/trunk/drivers/platform/x86/acerhdf.c b/trunk/drivers/platform/x86/acerhdf.c index 2fd9d36acd15..639db4d0aa76 100644 --- a/trunk/drivers/platform/x86/acerhdf.c +++ b/trunk/drivers/platform/x86/acerhdf.c @@ -5,7 +5,7 @@ * * (C) 2009 - Peter Feuerer peter (a) piie.net * http://piie.net - * 2009 Borislav Petkov bp (a) alien8.de + * 2009 Borislav Petkov * * Inspired by and many thanks to: * o acerfand - Rachel Greenham diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index 40469044088d..85c9e2bff8e6 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -683,8 +683,6 @@ EXPORT_SYMBOL(dget_parent); /** * d_find_alias - grab a hashed alias of inode * @inode: inode in question - * @want_discon: flag, used by d_splice_alias, to request - * that only a DISCONNECTED alias be returned. * * If inode has a hashed alias, or is a directory and has any alias, * acquire the reference to alias and return it. Otherwise return NULL. @@ -693,10 +691,9 @@ EXPORT_SYMBOL(dget_parent); * of a filesystem. * * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer - * any other hashed alias over that one unless @want_discon is set, - * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. + * any other hashed alias over that. */ -static struct dentry *__d_find_alias(struct inode *inode, int want_discon) +static struct dentry *__d_find_alias(struct inode *inode) { struct dentry *alias, *discon_alias; @@ -708,7 +705,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon) if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; - } else if (!want_discon) { + } else { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; @@ -739,7 +736,7 @@ struct dentry *d_find_alias(struct inode *inode) if (!list_empty(&inode->i_dentry)) { spin_lock(&inode->i_lock); - de = __d_find_alias(inode, 0); + de = __d_find_alias(inode); spin_unlock(&inode->i_lock); } return de; @@ -1650,9 +1647,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) if (inode && S_ISDIR(inode->i_mode)) { spin_lock(&inode->i_lock); - new = __d_find_alias(inode, 1); + new = __d_find_any_alias(inode); if (new) { - BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); spin_unlock(&inode->i_lock); security_d_instantiate(new, inode); d_move(new, dentry); @@ -2482,7 +2478,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) struct dentry *alias; /* Does an aliased dentry already exist? */ - alias = __d_find_alias(inode, 0); + alias = __d_find_alias(inode); if (alias) { actual = alias; write_seqlock(&rename_lock); diff --git a/trunk/fs/ext4/balloc.c b/trunk/fs/ext4/balloc.c index cee7812cc3cf..99b6324290db 100644 --- a/trunk/fs/ext4/balloc.c +++ b/trunk/fs/ext4/balloc.c @@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, * unusual file system layouts. */ if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { - block_cluster = EXT4_B2C(sbi, - ext4_block_bitmap(sb, gdp) - start); + block_cluster = EXT4_B2C(sbi, (start - + ext4_block_bitmap(sb, gdp))); if (block_cluster < num_clusters) block_cluster = -1; else if (block_cluster == num_clusters) { @@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { inode_cluster = EXT4_B2C(sbi, - ext4_inode_bitmap(sb, gdp) - start); + start - ext4_inode_bitmap(sb, gdp)); if (inode_cluster < num_clusters) inode_cluster = -1; else if (inode_cluster == num_clusters) { @@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, itbl_blk = ext4_inode_table(sb, gdp); for (i = 0; i < sbi->s_itb_per_group; i++) { if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { - c = EXT4_B2C(sbi, itbl_blk + i - start); + c = EXT4_B2C(sbi, start - itbl_blk + i); if ((c < num_clusters) || (c == inode_cluster) || (c == block_cluster) || (c == itbl_cluster)) continue; diff --git a/trunk/fs/ext4/ioctl.c b/trunk/fs/ext4/ioctl.c index e34deac3f366..8ad112ae0ade 100644 --- a/trunk/fs/ext4/ioctl.c +++ b/trunk/fs/ext4/ioctl.c @@ -123,6 +123,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) else ext4_clear_inode_flag(inode, i); } + ei->i_flags = flags; ext4_set_inode_flags(inode); inode->i_ctime = ext4_current_time(inode); diff --git a/trunk/fs/ubifs/debug.c b/trunk/fs/ubifs/debug.c index 84a7e6f3c046..685a83756b2b 100644 --- a/trunk/fs/ubifs/debug.c +++ b/trunk/fs/ubifs/debug.c @@ -2918,9 +2918,6 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) struct dentry *dent; struct ubifs_debug_info *d = c->dbg; - if (!IS_ENABLED(DEBUG_FS)) - return 0; - n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, c->vi.ubi_num, c->vi.vol_id); if (n == UBIFS_DFS_DIR_LEN) { @@ -3013,8 +3010,7 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) */ void dbg_debugfs_exit_fs(struct ubifs_info *c) { - if (IS_ENABLED(DEBUG_FS)) - debugfs_remove_recursive(c->dbg->dfs_dir); + debugfs_remove_recursive(c->dbg->dfs_dir); } struct ubifs_global_debug_info ubifs_dbg; @@ -3099,9 +3095,6 @@ int dbg_debugfs_init(void) const char *fname; struct dentry *dent; - if (!IS_ENABLED(DEBUG_FS)) - return 0; - fname = "ubifs"; dent = debugfs_create_dir(fname, NULL); if (IS_ERR_OR_NULL(dent)) @@ -3166,8 +3159,7 @@ int dbg_debugfs_init(void) */ void dbg_debugfs_exit(void) { - if (IS_ENABLED(DEBUG_FS)) - debugfs_remove_recursive(dfs_rootdir); + debugfs_remove_recursive(dfs_rootdir); } /** diff --git a/trunk/include/acpi/acpi_bus.h b/trunk/include/acpi/acpi_bus.h index 9e6e1c6eb60a..b0d62820ada1 100644 --- a/trunk/include/acpi/acpi_bus.h +++ b/trunk/include/acpi/acpi_bus.h @@ -440,8 +440,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) #else /* CONFIG_ACPI */ -static inline int register_acpi_bus_type(void *bus) { return 0; } -static inline int unregister_acpi_bus_type(void *bus) { return 0; } +static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } +static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } #endif /* CONFIG_ACPI */ diff --git a/trunk/include/drm/exynos_drm.h b/trunk/include/drm/exynos_drm.h index 68733587e700..b6d7ce92eadd 100644 --- a/trunk/include/drm/exynos_drm.h +++ b/trunk/include/drm/exynos_drm.h @@ -64,7 +64,6 @@ struct drm_exynos_gem_map_off { * A structure for mapping buffer. * * @handle: a handle to gem object created. - * @pad: just padding to be 64-bit aligned. * @size: memory size to be mapped. * @mapped: having user virtual address mmaped. * - this variable would be filled by exynos gem module @@ -73,8 +72,7 @@ struct drm_exynos_gem_map_off { */ struct drm_exynos_gem_mmap { unsigned int handle; - unsigned int pad; - uint64_t size; + unsigned int size; uint64_t mapped; }; diff --git a/trunk/include/linux/moduleparam.h b/trunk/include/linux/moduleparam.h index d6a58065c09c..1b14d25162cb 100644 --- a/trunk/include/linux/moduleparam.h +++ b/trunk/include/linux/moduleparam.h @@ -128,7 +128,7 @@ struct kparam_array * The ops can have NULL set or get functions. */ #define module_param_cb(name, ops, arg, perm) \ - __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0) /** * _param_cb - general callback for a module/cmdline parameter @@ -192,7 +192,7 @@ struct kparam_array { (void *)set, (void *)get }; \ __module_param_call(MODULE_PARAM_PREFIX, \ name, &__param_ops_##name, arg, \ - (perm) + sizeof(__check_old_set_param(set))*0, -1) + (perm) + sizeof(__check_old_set_param(set))*0, 0) /* We don't get oldget: it's often a new-style param_get_uint, etc. */ static inline int @@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void) */ #define core_param(name, var, type, perm) \ param_check_##type(name, &(var)); \ - __module_param_call("", name, ¶m_ops_##type, &var, perm, -1) + __module_param_call("", name, ¶m_ops_##type, &var, perm, 0) #endif /* !MODULE */ /** @@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void) = { len, string }; \ __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_ops_string, \ - .str = &__param_string_##name, perm, -1); \ + .str = &__param_string_##name, perm, 0); \ __MODULE_PARM_TYPE(name, "string") /** @@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_array_ops, \ .arr = &__param_arr_##name, \ - perm, -1); \ + perm, 0); \ __MODULE_PARM_TYPE(name, "array of " #type) extern struct kernel_param_ops param_array_ops; diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index 45db49f64bb4..f32578634d9d 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -555,8 +555,6 @@ enum perf_event_type { PERF_RECORD_MAX, /* non-ABI */ }; -#define PERF_MAX_STACK_DEPTH 127 - enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, PERF_CONTEXT_KERNEL = (__u64)-128, @@ -611,6 +609,8 @@ struct perf_guest_info_callbacks { #include #include +#define PERF_MAX_STACK_DEPTH 255 + struct perf_callchain_entry { __u64 nr; __u64 ip[PERF_MAX_STACK_DEPTH]; diff --git a/trunk/include/linux/prctl.h b/trunk/include/linux/prctl.h index 3988012255dc..711e0a30aacc 100644 --- a/trunk/include/linux/prctl.h +++ b/trunk/include/linux/prctl.h @@ -127,8 +127,8 @@ #define PR_SET_PTRACER 0x59616d61 # define PR_SET_PTRACER_ANY ((unsigned long)-1) -#define PR_SET_CHILD_SUBREAPER 36 -#define PR_GET_CHILD_SUBREAPER 37 +#define PR_SET_CHILD_SUBREAPER 36 +#define PR_GET_CHILD_SUBREAPER 37 /* * If no_new_privs is set, then operations that grant new privileges (i.e. @@ -142,9 +142,7 @@ * asking selinux for a specific new context (e.g. with runcon) will result * in execve returning -EPERM. */ -#define PR_SET_NO_NEW_PRIVS 38 -#define PR_GET_NO_NEW_PRIVS 39 - -#define PR_GET_TID_ADDRESS 40 +#define PR_SET_NO_NEW_PRIVS 38 +#define PR_GET_NO_NEW_PRIVS 39 #endif /* _LINUX_PRCTL_H */ diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index c688d4cc2e40..6029d8c54476 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -439,7 +439,6 @@ extern int get_dumpable(struct mm_struct *mm); /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ -#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) diff --git a/trunk/init/main.c b/trunk/init/main.c index b5cc0a7c4708..1ca6b32c4828 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void) parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, - -1, -1, &unknown_bootoption); + 0, 0, &unknown_bootoption); jump_label_init(); @@ -755,8 +755,13 @@ static void __init do_initcalls(void) { int level; - for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) + for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { + pr_info("initlevel:%d=%s, %d registered initcalls\n", + level, initcall_level_names[level], + (int) (initcall_levels[level+1] + - initcall_levels[level])); do_initcall_level(level); + } } /* diff --git a/trunk/ipc/shm.c b/trunk/ipc/shm.c index 41c1285d697a..5e2cbfdab6fc 100644 --- a/trunk/ipc/shm.c +++ b/trunk/ipc/shm.c @@ -393,16 +393,6 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } -static long shm_fallocate(struct file *file, int mode, loff_t offset, - loff_t len) -{ - struct shm_file_data *sfd = shm_file_data(file); - - if (!sfd->file->f_op->fallocate) - return -EOPNOTSUPP; - return sfd->file->f_op->fallocate(file, mode, offset, len); -} - static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -420,7 +410,6 @@ static const struct file_operations shm_file_operations = { .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, - .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { @@ -429,7 +418,6 @@ static const struct file_operations shm_file_operations_huge = { .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, - .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c index f85c0154b333..5b06cbbf6931 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/events/core.c @@ -3181,6 +3181,7 @@ static void perf_event_for_each(struct perf_event *event, event = event->group_leader; perf_event_for_each_child(event, func); + func(event); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(sibling, func); mutex_unlock(&ctx->mutex); diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index d5583f9588e7..b2a2d236f27b 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -3632,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. - * @env: The load balancing environment. + * @sd: The sched_domain whose statistics are to be updated. * @group: sched_group whose statistics are to be updated. * @load_idx: Load index of sched_domain of this_cpu for load calc. * @local_group: Does group contain this_cpu. @@ -3741,10 +3741,11 @@ static inline void update_sg_lb_stats(struct lb_env *env, /** * update_sd_pick_busiest - return 1 on busiest group - * @env: The load balancing environment. + * @sd: sched_domain whose statistics are to be checked * @sds: sched_domain statistics * @sg: sched_group candidate to be checked for being the busiest * @sgs: sched_group statistics + * @this_cpu: the current cpu * * Determine if @sg is a busier group than the previously selected * busiest group. @@ -3782,7 +3783,9 @@ static bool update_sd_pick_busiest(struct lb_env *env, /** * update_sd_lb_stats - Update sched_domain's statistics for load balancing. - * @env: The load balancing environment. + * @sd: sched_domain whose statistics are to be updated. + * @this_cpu: Cpu for which load balance is currently performed. + * @idle: Idle status of this_cpu * @cpus: Set of cpus considered for load balancing. * @balance: Should we balance. * @sds: variable to hold the statistics for this sched_domain. @@ -3871,8 +3874,10 @@ static inline void update_sd_lb_stats(struct lb_env *env, * Returns 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in *imbalance. * - * @env: The load balancing environment. + * @sd: The sched_domain whose packing is to be checked. * @sds: Statistics of the sched_domain which is to be packed + * @this_cpu: The cpu at whose sched_domain we're performing load-balance. + * @imbalance: returns amount of imbalanced due to packing. */ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) { @@ -3898,8 +3903,9 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) * fix_small_imbalance - Calculate the minor imbalance that exists * amongst the groups of a sched_domain, during * load balancing. - * @env: The load balancing environment. * @sds: Statistics of the sched_domain whose imbalance is to be calculated. + * @this_cpu: The cpu at whose sched_domain we're performing load-balance. + * @imbalance: Variable to store the imbalance. */ static inline void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) @@ -4042,7 +4048,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * Also calculates the amount of weighted load which should be moved * to restore balance. * - * @env: The load balancing environment. + * @sd: The sched_domain whose busiest group is to be returned. + * @this_cpu: The cpu for which load balancing is currently being performed. + * @imbalance: Variable which stores amount of weighted load which should + * be moved to restore balance/put a group to idle. + * @idle: The idle status of this_cpu. * @cpus: The set of CPUs under consideration for load-balancing. * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index f0ec44dcd415..9ff89cb9657a 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -1786,13 +1786,27 @@ SYSCALL_DEFINE1(umask, int, mask) } #ifdef CONFIG_CHECKPOINT_RESTORE +static bool vma_flags_mismatch(struct vm_area_struct *vma, + unsigned long required, + unsigned long banned) +{ + return (vma->vm_flags & required) != required || + (vma->vm_flags & banned); +} + static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) { - struct vm_area_struct *vma; struct file *exe_file; struct dentry *dentry; int err; + /* + * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's + * remain. So perform a quick test first. + */ + if (mm->num_exe_file_vmas) + return -EBUSY; + exe_file = fget(fd); if (!exe_file) return -EBADF; @@ -1813,30 +1827,17 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (err) goto exit; - down_write(&mm->mmap_sem); - - /* - * Forbid mm->exe_file change if there are mapped other files. - */ - err = -EBUSY; - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (vma->vm_file && !path_equal(&vma->vm_file->f_path, - &exe_file->f_path)) - goto exit_unlock; - } - /* * The symlink can be changed only once, just to disallow arbitrary * transitions malicious software might bring in. This means one * could make a snapshot over all processes running and monitor * /proc/pid/exe changes to notice unusual activity if needed. */ - err = -EPERM; - if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) - goto exit_unlock; - - set_mm_exe_file(mm, exe_file); -exit_unlock: + down_write(&mm->mmap_sem); + if (likely(!mm->exe_file)) + set_mm_exe_file(mm, exe_file); + else + err = -EBUSY; up_write(&mm->mmap_sem); exit: @@ -1861,7 +1862,7 @@ static int prctl_set_mm(int opt, unsigned long addr, if (opt == PR_SET_MM_EXE_FILE) return prctl_set_mm_exe_file(mm, (unsigned int)addr); - if (addr >= TASK_SIZE || addr < mmap_min_addr) + if (addr >= TASK_SIZE) return -EINVAL; error = -EINVAL; @@ -1923,6 +1924,12 @@ static int prctl_set_mm(int opt, unsigned long addr, error = -EFAULT; goto out; } +#ifdef CONFIG_STACK_GROWSUP + if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0)) +#else + if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0)) +#endif + goto out; if (opt == PR_SET_MM_START_STACK) mm->start_stack = addr; else if (opt == PR_SET_MM_ARG_START) @@ -1974,22 +1981,12 @@ static int prctl_set_mm(int opt, unsigned long addr, up_read(&mm->mmap_sem); return error; } - -static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) -{ - return put_user(me->clear_child_tid, tid_addr); -} - #else /* CONFIG_CHECKPOINT_RESTORE */ static int prctl_set_mm(int opt, unsigned long addr, unsigned long arg4, unsigned long arg5) { return -EINVAL; } -static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) -{ - return -EINVAL; -} #endif SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, @@ -2127,9 +2124,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, else return -EINVAL; break; - case PR_GET_TID_ADDRESS: - error = prctl_get_tid_address(me, (int __user **)arg2); - break; default: return -EINVAL; } diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index 6f46a00a1e8a..6e46cacf5969 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -962,7 +962,6 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) timekeeper.xtime.tv_sec++; leap = second_overflow(timekeeper.xtime.tv_sec); timekeeper.xtime.tv_sec += leap; - timekeeper.wall_to_monotonic.tv_sec -= leap; } /* Accumulate raw time */ @@ -1078,7 +1077,6 @@ static void update_wall_time(void) timekeeper.xtime.tv_sec++; leap = second_overflow(timekeeper.xtime.tv_sec); timekeeper.xtime.tv_sec += leap; - timekeeper.wall_to_monotonic.tv_sec -= leap; } timekeeping_update(false); diff --git a/trunk/lib/btree.c b/trunk/lib/btree.c index f9a484676cb6..e5ec1e9c1aa5 100644 --- a/trunk/lib/btree.c +++ b/trunk/lib/btree.c @@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, if (head->height == 0) return NULL; - longcpy(key, __key, geo->keylen); retry: + longcpy(key, __key, geo->keylen); dec_key(geo, key); node = head->node; @@ -351,7 +351,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, } miss: if (retry_key) { - longcpy(key, retry_key, geo->keylen); + __key = retry_key; retry_key = NULL; goto retry; } @@ -509,7 +509,6 @@ static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, int btree_insert(struct btree_head *head, struct btree_geo *geo, unsigned long *key, void *val, gfp_t gfp) { - BUG_ON(!val); return btree_insert_level(head, geo, key, val, 1, gfp); } EXPORT_SYMBOL_GPL(btree_insert); diff --git a/trunk/lib/raid6/recov.c b/trunk/lib/raid6/recov.c index a95bccb8497d..1805a5cc5daa 100644 --- a/trunk/lib/raid6/recov.c +++ b/trunk/lib/raid6/recov.c @@ -22,8 +22,8 @@ #include /* Recover two failed data blocks. */ -static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, - int failb, void **ptrs) +void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, + void **ptrs) { u8 *p, *q, *dp, *dq; u8 px, qx, db; @@ -66,8 +66,7 @@ static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, } /* Recover failure of one data block plus the P block */ -static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, - void **ptrs) +void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs) { u8 *p, *q, *dq; const u8 *qmul; /* Q multiplier table */ diff --git a/trunk/lib/raid6/recov_ssse3.c b/trunk/lib/raid6/recov_ssse3.c index ecb710c0b4d9..37ae61930559 100644 --- a/trunk/lib/raid6/recov_ssse3.c +++ b/trunk/lib/raid6/recov_ssse3.c @@ -19,8 +19,8 @@ static int raid6_has_ssse3(void) boot_cpu_has(X86_FEATURE_SSSE3); } -static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, - int failb, void **ptrs) +void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, + void **ptrs) { u8 *p, *q, *dp, *dq; const u8 *pbmul; /* P multiplier table for B data */ @@ -194,8 +194,7 @@ static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, } -static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, - void **ptrs) +void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs) { u8 *p, *q, *dq; const u8 *qmul; /* Q multiplier table */ diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c index a15a466d0d1d..585bd220a21e 100644 --- a/trunk/mm/shmem.c +++ b/trunk/mm/shmem.c @@ -683,21 +683,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, mutex_lock(&shmem_swaplist_mutex); /* * We needed to drop mutex to make that restrictive page - * allocation, but the inode might have been freed while we - * dropped it: although a racing shmem_evict_inode() cannot - * complete without emptying the radix_tree, our page lock - * on this swapcache page is not enough to prevent that - - * free_swap_and_cache() of our swap entry will only - * trylock_page(), removing swap from radix_tree whatever. - * - * We must not proceed to shmem_add_to_page_cache() if the - * inode has been freed, but of course we cannot rely on - * inode or mapping or info to check that. However, we can - * safely check if our swap entry is still in use (and here - * it can't have got reused for another page): if it's still - * in use, then the inode cannot have been freed yet, and we - * can safely proceed (if it's no longer in use, that tells - * nothing about the inode, but we don't need to unuse swap). + * allocation; but the inode might already be freed by now, + * and we cannot refer to inode or mapping or info to check. + * However, we do hold page lock on the PageSwapCache page, + * so can check if that still has our reference remaining. */ if (!page_swapcount(*pagep)) error = -ENOENT; @@ -741,9 +730,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page) /* * There's a faint possibility that swap page was replaced before - * caller locked it: caller will come back later with the right page. + * caller locked it: it will come back later with the right page. */ - if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) + if (unlikely(!PageSwapCache(page))) goto out; /* @@ -1006,15 +995,21 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, newpage = shmem_alloc_page(gfp, info, index); if (!newpage) return -ENOMEM; + VM_BUG_ON(shmem_should_replace_page(newpage, gfp)); + *pagep = newpage; page_cache_get(newpage); copy_highpage(newpage, oldpage); - flush_dcache_page(newpage); + VM_BUG_ON(!PageLocked(oldpage)); __set_page_locked(newpage); + VM_BUG_ON(!PageUptodate(oldpage)); SetPageUptodate(newpage); + VM_BUG_ON(!PageSwapBacked(oldpage)); SetPageSwapBacked(newpage); + VM_BUG_ON(!swap_index); set_page_private(newpage, swap_index); + VM_BUG_ON(!PageSwapCache(oldpage)); SetPageSwapCache(newpage); /* @@ -1024,24 +1019,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, spin_lock_irq(&swap_mapping->tree_lock); error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, newpage); - if (!error) { - __inc_zone_page_state(newpage, NR_FILE_PAGES); - __dec_zone_page_state(oldpage, NR_FILE_PAGES); - } + __inc_zone_page_state(newpage, NR_FILE_PAGES); + __dec_zone_page_state(oldpage, NR_FILE_PAGES); spin_unlock_irq(&swap_mapping->tree_lock); + BUG_ON(error); - if (unlikely(error)) { - /* - * Is this possible? I think not, now that our callers check - * both PageSwapCache and page_private after getting page lock; - * but be defensive. Reverse old to newpage for clear and free. - */ - oldpage = newpage; - } else { - mem_cgroup_replace_page_cache(oldpage, newpage); - lru_cache_add_anon(newpage); - *pagep = newpage; - } + mem_cgroup_replace_page_cache(oldpage, newpage); + lru_cache_add_anon(newpage); ClearPageSwapCache(oldpage); set_page_private(oldpage, 0); @@ -1049,7 +1033,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, unlock_page(oldpage); page_cache_release(oldpage); page_cache_release(oldpage); - return error; + return 0; } /* @@ -1123,8 +1107,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, /* We have to do this with page locked to prevent races */ lock_page(page); - if (!PageSwapCache(page) || page_private(page) != swap.val || - page->mapping) { + if (!PageSwapCache(page) || page->mapping) { error = -EEXIST; /* try again */ goto failed; } diff --git a/trunk/tools/perf/MANIFEST b/trunk/tools/perf/MANIFEST index b4b572e8c100..5476bc0a1eac 100644 --- a/trunk/tools/perf/MANIFEST +++ b/trunk/tools/perf/MANIFEST @@ -1,6 +1,4 @@ tools/perf -tools/scripts -tools/lib/traceevent include/linux/const.h include/linux/perf_event.h include/linux/rbtree.h diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c index 25249f76329d..8c767c6bca91 100644 --- a/trunk/tools/perf/builtin-report.c +++ b/trunk/tools/perf/builtin-report.c @@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, if (symbol_conf.use_callchain) { err = callchain_append(he->callchain, - &callchain_cursor, + &evsel->hists.callchain_cursor, sample->period); if (err) return err; @@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, * so we don't allocated the extra space needed because the stdio * code will not use it. */ - if (he->ms.sym != NULL && use_browser > 0) { + if (al->sym != NULL && use_browser > 0) { struct annotation *notes = symbol__annotation(he->ms.sym); assert(evsel != NULL); diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index 262589991ea4..62ae30d34fa6 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -1129,7 +1129,7 @@ static int add_default_attributes(void) return 0; if (!evsel_list->nr_entries) { - if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) + if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) return -1; } @@ -1139,21 +1139,21 @@ static int add_default_attributes(void) return 0; /* Append detailed run extra attributes: */ - if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) + if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) return -1; if (detailed_run < 2) return 0; /* Append very detailed run extra attributes: */ - if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) + if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) return -1; if (detailed_run < 3) return 0; /* Append very, very detailed run extra attributes: */ - return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); + return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); } int cmd_stat(int argc, const char **argv, const char *prefix __used) diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c index 6bb0277b7dfe..871b540293e1 100644 --- a/trunk/tools/perf/builtin-top.c +++ b/trunk/tools/perf/builtin-top.c @@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, &callchain_cursor, + err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, sample->period); if (err) return; diff --git a/trunk/tools/perf/design.txt b/trunk/tools/perf/design.txt index 67e5d0cace85..bd0bb1b1279b 100644 --- a/trunk/tools/perf/design.txt +++ b/trunk/tools/perf/design.txt @@ -409,15 +409,14 @@ Counters can be enabled and disabled in two ways: via ioctl and via prctl. When a counter is disabled, it doesn't count or generate events but does continue to exist and maintain its count value. -An individual counter can be enabled with +An individual counter or counter group can be enabled with - ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); + ioctl(fd, PERF_EVENT_IOC_ENABLE); or disabled with - ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); + ioctl(fd, PERF_EVENT_IOC_DISABLE); -For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument. Enabling or disabling the leader of a group enables or disables the whole group; that is, while the group leader is disabled, none of the counters in the group will count. Enabling or disabling a member of a diff --git a/trunk/tools/perf/ui/browsers/annotate.c b/trunk/tools/perf/ui/browsers/annotate.c index 34b1c46eaf42..4deea6aaf927 100644 --- a/trunk/tools/perf/ui/browsers/annotate.c +++ b/trunk/tools/perf/ui/browsers/annotate.c @@ -668,7 +668,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, "q/ESC/CTRL+C Exit\n\n" "-> Go to target\n" "<- Exit\n" - "H Cycle thru hottest instructions\n" + "h Cycle thru hottest instructions\n" "j Toggle showing jump to target arrows\n" "J Toggle showing number of jump sources on targets\n" "n Search next string\n" diff --git a/trunk/tools/perf/util/PERF-VERSION-GEN b/trunk/tools/perf/util/PERF-VERSION-GEN index 95264f304179..ad73300f7bac 100755 --- a/trunk/tools/perf/util/PERF-VERSION-GEN +++ b/trunk/tools/perf/util/PERF-VERSION-GEN @@ -12,7 +12,7 @@ LF=' # First check if there is a .git to get the version from git describe # otherwise try to get the version from the kernel makefile if test -d ../../.git -o -f ../../.git && - VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && + VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && case "$VN" in *$LF*) (exit 1) ;; v[0-9]*) diff --git a/trunk/tools/perf/util/callchain.c b/trunk/tools/perf/util/callchain.c index 3a6bff47614f..9f7106a8d9a4 100644 --- a/trunk/tools/perf/util/callchain.c +++ b/trunk/tools/perf/util/callchain.c @@ -18,8 +18,6 @@ #include "util.h" #include "callchain.h" -__thread struct callchain_cursor callchain_cursor; - bool ip_callchain__valid(struct ip_callchain *chain, const union perf_event *event) { diff --git a/trunk/tools/perf/util/callchain.h b/trunk/tools/perf/util/callchain.h index 3bdb407f9cd9..7f9c0f1ae3a9 100644 --- a/trunk/tools/perf/util/callchain.h +++ b/trunk/tools/perf/util/callchain.h @@ -76,8 +76,6 @@ struct callchain_cursor { struct callchain_cursor_node *curr; }; -extern __thread struct callchain_cursor callchain_cursor; - static inline void callchain_init(struct callchain_root *root) { INIT_LIST_HEAD(&root->node.siblings); diff --git a/trunk/tools/perf/util/evlist.c b/trunk/tools/perf/util/evlist.c index 7400fb3fc50c..4ac5f5ae4ce9 100644 --- a/trunk/tools/perf/util/evlist.c +++ b/trunk/tools/perf/util/evlist.c @@ -159,17 +159,6 @@ int perf_evlist__add_attrs(struct perf_evlist *evlist, return -1; } -int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, - struct perf_event_attr *attrs, size_t nr_attrs) -{ - size_t i; - - for (i = 0; i < nr_attrs; i++) - event_attr_init(attrs + i); - - return perf_evlist__add_attrs(evlist, attrs, nr_attrs); -} - static int trace_event__id(const char *evname) { char *filename, *colon; @@ -274,8 +263,7 @@ void perf_evlist__disable(struct perf_evlist *evlist) for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { list_for_each_entry(pos, &evlist->entries, node) { for (thread = 0; thread < evlist->threads->nr; thread++) - ioctl(FD(pos, cpu, thread), - PERF_EVENT_IOC_DISABLE, 0); + ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); } } } @@ -288,8 +276,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { list_for_each_entry(pos, &evlist->entries, node) { for (thread = 0; thread < evlist->threads->nr; thread++) - ioctl(FD(pos, cpu, thread), - PERF_EVENT_IOC_ENABLE, 0); + ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); } } } diff --git a/trunk/tools/perf/util/evlist.h b/trunk/tools/perf/util/evlist.h index 989bee9624c2..58abb63ac13a 100644 --- a/trunk/tools/perf/util/evlist.h +++ b/trunk/tools/perf/util/evlist.h @@ -54,8 +54,6 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); int perf_evlist__add_default(struct perf_evlist *evlist); int perf_evlist__add_attrs(struct perf_evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs); -int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, - struct perf_event_attr *attrs, size_t nr_attrs); int perf_evlist__add_tracepoints(struct perf_evlist *evlist, const char *tracepoints[], size_t nr_tracepoints); int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, @@ -64,8 +62,6 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, #define perf_evlist__add_attrs_array(evlist, array) \ perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) -#define perf_evlist__add_default_attrs(evlist, array) \ - __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) #define perf_evlist__add_tracepoints_array(evlist, array) \ perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) diff --git a/trunk/tools/perf/util/evsel.c b/trunk/tools/perf/util/evsel.c index 9f6cebd798ee..91d19138f3ec 100644 --- a/trunk/tools/perf/util/evsel.c +++ b/trunk/tools/perf/util/evsel.c @@ -494,24 +494,16 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel, } static int perf_event__parse_id_sample(const union perf_event *event, u64 type, - struct perf_sample *sample, - bool swapped) + struct perf_sample *sample) { const u64 *array = event->sample.array; - union u64_swap u; array += ((event->header.size - sizeof(event->header)) / sizeof(u64)) - 1; if (type & PERF_SAMPLE_CPU) { - u.val64 = *array; - if (swapped) { - /* undo swap of u64, then swap on individual u32s */ - u.val64 = bswap_64(u.val64); - u.val32[0] = bswap_32(u.val32[0]); - } - - sample->cpu = u.val32[0]; + u32 *p = (u32 *)array; + sample->cpu = *p; array--; } @@ -531,16 +523,9 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type, } if (type & PERF_SAMPLE_TID) { - u.val64 = *array; - if (swapped) { - /* undo swap of u64, then swap on individual u32s */ - u.val64 = bswap_64(u.val64); - u.val32[0] = bswap_32(u.val32[0]); - u.val32[1] = bswap_32(u.val32[1]); - } - - sample->pid = u.val32[0]; - sample->tid = u.val32[1]; + u32 *p = (u32 *)array; + sample->pid = p[0]; + sample->tid = p[1]; } return 0; @@ -577,7 +562,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, if (event->header.type != PERF_RECORD_SAMPLE) { if (!sample_id_all) return 0; - return perf_event__parse_id_sample(event, type, data, swapped); + return perf_event__parse_id_sample(event, type, data); } array = event->sample.array; diff --git a/trunk/tools/perf/util/hist.c b/trunk/tools/perf/util/hist.c index 514e2a4b367d..1293b5ebea4d 100644 --- a/trunk/tools/perf/util/hist.c +++ b/trunk/tools/perf/util/hist.c @@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he) * collapse the histogram */ -static bool hists__collapse_insert_entry(struct hists *hists __used, +static bool hists__collapse_insert_entry(struct hists *hists, struct rb_root *root, struct hist_entry *he) { @@ -397,9 +397,8 @@ static bool hists__collapse_insert_entry(struct hists *hists __used, iter->period += he->period; iter->nr_events += he->nr_events; if (symbol_conf.use_callchain) { - callchain_cursor_reset(&callchain_cursor); - callchain_merge(&callchain_cursor, - iter->callchain, + callchain_cursor_reset(&hists->callchain_cursor); + callchain_merge(&hists->callchain_cursor, iter->callchain, he->callchain); } hist_entry__free(he); diff --git a/trunk/tools/perf/util/hist.h b/trunk/tools/perf/util/hist.h index 34bb556d6219..cfc64e293f90 100644 --- a/trunk/tools/perf/util/hist.h +++ b/trunk/tools/perf/util/hist.h @@ -67,6 +67,8 @@ struct hists { struct events_stats stats; u64 event_stream; u16 col_len[HISTC_NR_COLS]; + /* Best would be to reuse the session callchain cursor */ + struct callchain_cursor callchain_cursor; }; struct hist_entry *__hists__add_entry(struct hists *self, diff --git a/trunk/tools/perf/util/pager.c b/trunk/tools/perf/util/pager.c index 3322b8446e89..1915de20dcac 100644 --- a/trunk/tools/perf/util/pager.c +++ b/trunk/tools/perf/util/pager.c @@ -57,10 +57,6 @@ void setup_pager(void) } if (!pager) pager = getenv("PAGER"); - if (!pager) { - if (!access("/usr/bin/pager", X_OK)) - pager = "/usr/bin/pager"; - } if (!pager) pager = "less"; else if (!*pager || !strcmp(pager, "cat")) diff --git a/trunk/tools/perf/util/probe-event.c b/trunk/tools/perf/util/probe-event.c index 0dda25d82d06..59dccc98b554 100644 --- a/trunk/tools/perf/util/probe-event.c +++ b/trunk/tools/perf/util/probe-event.c @@ -2164,12 +2164,16 @@ int del_perf_probe_events(struct strlist *dellist) error: if (kfd >= 0) { - strlist__delete(namelist); + if (namelist) + strlist__delete(namelist); + close(kfd); } if (ufd >= 0) { - strlist__delete(unamelist); + if (unamelist) + strlist__delete(unamelist); + close(ufd); } diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index 2600916efa83..93d355d27109 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -288,8 +288,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self, return bi; } -int machine__resolve_callchain(struct machine *self, - struct perf_evsel *evsel __used, +int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, struct thread *thread, struct ip_callchain *chain, struct symbol **parent) @@ -298,12 +297,7 @@ int machine__resolve_callchain(struct machine *self, unsigned int i; int err; - callchain_cursor_reset(&callchain_cursor); - - if (chain->nr > PERF_MAX_STACK_DEPTH) { - pr_warning("corrupted callchain. skipping...\n"); - return 0; - } + callchain_cursor_reset(&evsel->hists.callchain_cursor); for (i = 0; i < chain->nr; i++) { u64 ip; @@ -323,14 +317,7 @@ int machine__resolve_callchain(struct machine *self, case PERF_CONTEXT_USER: cpumode = PERF_RECORD_MISC_USER; break; default: - pr_debug("invalid callchain context: " - "%"PRId64"\n", (s64) ip); - /* - * It seems the callchain is corrupted. - * Discard all. - */ - callchain_cursor_reset(&callchain_cursor); - return 0; + break; } continue; } @@ -346,7 +333,7 @@ int machine__resolve_callchain(struct machine *self, break; } - err = callchain_cursor_append(&callchain_cursor, + err = callchain_cursor_append(&evsel->hists.callchain_cursor, ip, al.map, al.sym); if (err) return err; @@ -454,65 +441,37 @@ void mem_bswap_64(void *src, int byte_size) } } -static void swap_sample_id_all(union perf_event *event, void *data) -{ - void *end = (void *) event + event->header.size; - int size = end - data; - - BUG_ON(size % sizeof(u64)); - mem_bswap_64(data, size); -} - -static void perf_event__all64_swap(union perf_event *event, - bool sample_id_all __used) +static void perf_event__all64_swap(union perf_event *event) { struct perf_event_header *hdr = &event->header; mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); } -static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) +static void perf_event__comm_swap(union perf_event *event) { event->comm.pid = bswap_32(event->comm.pid); event->comm.tid = bswap_32(event->comm.tid); - - if (sample_id_all) { - void *data = &event->comm.comm; - - data += ALIGN(strlen(data) + 1, sizeof(u64)); - swap_sample_id_all(event, data); - } } -static void perf_event__mmap_swap(union perf_event *event, - bool sample_id_all) +static void perf_event__mmap_swap(union perf_event *event) { event->mmap.pid = bswap_32(event->mmap.pid); event->mmap.tid = bswap_32(event->mmap.tid); event->mmap.start = bswap_64(event->mmap.start); event->mmap.len = bswap_64(event->mmap.len); event->mmap.pgoff = bswap_64(event->mmap.pgoff); - - if (sample_id_all) { - void *data = &event->mmap.filename; - - data += ALIGN(strlen(data) + 1, sizeof(u64)); - swap_sample_id_all(event, data); - } } -static void perf_event__task_swap(union perf_event *event, bool sample_id_all) +static void perf_event__task_swap(union perf_event *event) { event->fork.pid = bswap_32(event->fork.pid); event->fork.tid = bswap_32(event->fork.tid); event->fork.ppid = bswap_32(event->fork.ppid); event->fork.ptid = bswap_32(event->fork.ptid); event->fork.time = bswap_64(event->fork.time); - - if (sample_id_all) - swap_sample_id_all(event, &event->fork + 1); } -static void perf_event__read_swap(union perf_event *event, bool sample_id_all) +static void perf_event__read_swap(union perf_event *event) { event->read.pid = bswap_32(event->read.pid); event->read.tid = bswap_32(event->read.tid); @@ -520,9 +479,6 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all) event->read.time_enabled = bswap_64(event->read.time_enabled); event->read.time_running = bswap_64(event->read.time_running); event->read.id = bswap_64(event->read.id); - - if (sample_id_all) - swap_sample_id_all(event, &event->read + 1); } static u8 revbyte(u8 b) @@ -574,8 +530,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr) swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); } -static void perf_event__hdr_attr_swap(union perf_event *event, - bool sample_id_all __used) +static void perf_event__hdr_attr_swap(union perf_event *event) { size_t size; @@ -586,21 +541,18 @@ static void perf_event__hdr_attr_swap(union perf_event *event, mem_bswap_64(event->attr.id, size); } -static void perf_event__event_type_swap(union perf_event *event, - bool sample_id_all __used) +static void perf_event__event_type_swap(union perf_event *event) { event->event_type.event_type.event_id = bswap_64(event->event_type.event_type.event_id); } -static void perf_event__tracing_data_swap(union perf_event *event, - bool sample_id_all __used) +static void perf_event__tracing_data_swap(union perf_event *event) { event->tracing_data.size = bswap_32(event->tracing_data.size); } -typedef void (*perf_event__swap_op)(union perf_event *event, - bool sample_id_all); +typedef void (*perf_event__swap_op)(union perf_event *event); static perf_event__swap_op perf_event__swap_ops[] = { [PERF_RECORD_MMAP] = perf_event__mmap_swap, @@ -1034,15 +986,6 @@ static int perf_session__process_user_event(struct perf_session *session, union } } -static void event_swap(union perf_event *event, bool sample_id_all) -{ - perf_event__swap_op swap; - - swap = perf_event__swap_ops[event->header.type]; - if (swap) - swap(event, sample_id_all); -} - static int perf_session__process_event(struct perf_session *session, union perf_event *event, struct perf_tool *tool, @@ -1051,8 +994,9 @@ static int perf_session__process_event(struct perf_session *session, struct perf_sample sample; int ret; - if (session->header.needs_swap) - event_swap(event, session->sample_id_all); + if (session->header.needs_swap && + perf_event__swap_ops[event->header.type]) + perf_event__swap_ops[event->header.type](event); if (event->header.type >= PERF_RECORD_HEADER_MAX) return -EINVAL; @@ -1484,6 +1428,7 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, int print_sym, int print_dso, int print_symoffset) { struct addr_location al; + struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; struct callchain_cursor_node *node; if (perf_event__preprocess_sample(event, machine, &al, sample, @@ -1501,10 +1446,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, error("Failed to resolve callchain. Skipping\n"); return; } - callchain_cursor_commit(&callchain_cursor); + callchain_cursor_commit(cursor); while (1) { - node = callchain_cursor_current(&callchain_cursor); + node = callchain_cursor_current(cursor); if (!node) break; @@ -1515,12 +1460,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, } if (print_dso) { printf(" ("); - map__fprintf_dsoname(node->map, stdout); + map__fprintf_dsoname(al.map, stdout); printf(")"); } printf("\n"); - callchain_cursor_advance(&callchain_cursor); + callchain_cursor_advance(cursor); } } else { diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c index 3e2e5ea0f03f..e2ba8858f3e1 100644 --- a/trunk/tools/perf/util/symbol.c +++ b/trunk/tools/perf/util/symbol.c @@ -323,7 +323,6 @@ struct dso *dso__new(const char *name) dso->sorted_by_name = 0; dso->has_build_id = 0; dso->kernel = DSO_TYPE_USER; - dso->needs_swap = DSO_SWAP__UNSET; INIT_LIST_HEAD(&dso->node); } @@ -1157,33 +1156,6 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) return -1; } -static int dso__swap_init(struct dso *dso, unsigned char eidata) -{ - static unsigned int const endian = 1; - - dso->needs_swap = DSO_SWAP__NO; - - switch (eidata) { - case ELFDATA2LSB: - /* We are big endian, DSO is little endian. */ - if (*(unsigned char const *)&endian != 1) - dso->needs_swap = DSO_SWAP__YES; - break; - - case ELFDATA2MSB: - /* We are little endian, DSO is big endian. */ - if (*(unsigned char const *)&endian != 0) - dso->needs_swap = DSO_SWAP__YES; - break; - - default: - pr_err("unrecognized DSO data encoding %d\n", eidata); - return -EINVAL; - } - - return 0; -} - static int dso__load_sym(struct dso *dso, struct map *map, const char *name, int fd, symbol_filter_t filter, int kmodule, int want_symtab) @@ -1215,9 +1187,6 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, goto out_elf_end; } - if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) - goto out_elf_end; - /* Always reject images with a mismatched build-id: */ if (dso->has_build_id) { u8 build_id[BUILD_ID_SIZE]; @@ -1303,7 +1272,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, if (opdsec && sym.st_shndx == opdidx) { u32 offset = sym.st_value - opdshdr.sh_addr; u64 *opd = opddata->d_buf + offset; - sym.st_value = DSO__SWAP(dso, u64, *opd); + sym.st_value = *opd; sym.st_shndx = elf_addr_to_index(elf, sym.st_value); } @@ -2817,11 +2786,8 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, struct map *dso__new_map(const char *name) { - struct map *map = NULL; struct dso *dso = dso__new(name); - - if (dso) - map = map__new2(0, dso, MAP__FUNCTION); + struct map *map = map__new2(0, dso, MAP__FUNCTION); return map; } diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h index af0752b1aca1..5649d63798cb 100644 --- a/trunk/tools/perf/util/symbol.h +++ b/trunk/tools/perf/util/symbol.h @@ -9,7 +9,6 @@ #include #include #include -#include #ifdef HAVE_CPLUS_DEMANGLE extern char *cplus_demangle(const char *, int); @@ -161,18 +160,11 @@ enum dso_kernel_type { DSO_TYPE_GUEST_KERNEL }; -enum dso_swap_type { - DSO_SWAP__UNSET, - DSO_SWAP__NO, - DSO_SWAP__YES, -}; - struct dso { struct list_head node; struct rb_root symbols[MAP__NR_TYPES]; struct rb_root symbol_names[MAP__NR_TYPES]; enum dso_kernel_type kernel; - enum dso_swap_type needs_swap; u8 adjust_symbols:1; u8 has_build_id:1; u8 hit:1; @@ -190,28 +182,6 @@ struct dso { char name[0]; }; -#define DSO__SWAP(dso, type, val) \ -({ \ - type ____r = val; \ - BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ - if (dso->needs_swap == DSO_SWAP__YES) { \ - switch (sizeof(____r)) { \ - case 2: \ - ____r = bswap_16(val); \ - break; \ - case 4: \ - ____r = bswap_32(val); \ - break; \ - case 8: \ - ____r = bswap_64(val); \ - break; \ - default: \ - BUG_ON(1); \ - } \ - } \ - ____r; \ -}) - struct dso *dso__new(const char *name); void dso__delete(struct dso *dso); diff --git a/trunk/tools/power/x86/turbostat/turbostat.c b/trunk/tools/power/x86/turbostat/turbostat.c index 16de7ad4850f..ab2f682fd44c 100644 --- a/trunk/tools/power/x86/turbostat/turbostat.c +++ b/trunk/tools/power/x86/turbostat/turbostat.c @@ -73,8 +73,8 @@ int backwards_count; char *progname; int num_cpus; -cpu_set_t *cpu_present_set, *cpu_mask; -size_t cpu_present_setsize, cpu_mask_size; +cpu_set_t *cpu_mask; +size_t cpu_mask_size; struct counters { unsigned long long tsc; /* per thread */ @@ -103,12 +103,6 @@ struct timeval tv_even; struct timeval tv_odd; struct timeval tv_delta; -int mark_cpu_present(int pkg, int core, int cpu) -{ - CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); - return 0; -} - /* * cpu_mask_init(ncpus) * @@ -124,18 +118,6 @@ void cpu_mask_init(int ncpus) } cpu_mask_size = CPU_ALLOC_SIZE(ncpus); CPU_ZERO_S(cpu_mask_size, cpu_mask); - - /* - * Allocate and initialize cpu_present_set - */ - cpu_present_set = CPU_ALLOC(ncpus); - if (cpu_present_set == NULL) { - perror("CPU_ALLOC"); - exit(3); - } - cpu_present_setsize = CPU_ALLOC_SIZE(ncpus); - CPU_ZERO_S(cpu_present_setsize, cpu_present_set); - for_all_cpus(mark_cpu_present); } void cpu_mask_uninit() @@ -143,9 +125,6 @@ void cpu_mask_uninit() CPU_FREE(cpu_mask); cpu_mask = NULL; cpu_mask_size = 0; - CPU_FREE(cpu_present_set); - cpu_present_set = NULL; - cpu_present_setsize = 0; } int cpu_migrate(int cpu) @@ -933,8 +912,6 @@ int is_snb(unsigned int family, unsigned int model) switch (model) { case 0x2A: case 0x2D: - case 0x3A: /* IVB */ - case 0x3D: /* IVB Xeon */ return 1; } return 0; @@ -1070,9 +1047,6 @@ int fork_it(char **argv) int retval; pid_t child_pid; get_counters(cnt_even); - - /* clear affinity side-effect of get_counters() */ - sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork();