diff --git a/[refs] b/[refs] index f253f8564d66..5578bd7b01be 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5dbc746960b5cd62c558cd6663697afd41f59d39 +refs/heads/master: b37e161388ac3980d5dfb73050e85874b84253eb diff --git a/trunk/Makefile b/trunk/Makefile index 0142c934adbd..c6863b55f7c7 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc6 NAME = Unicycling Gorilla # *DOCUMENTATION* diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 2651b1da1c56..49d993cee512 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -1189,16 +1189,6 @@ config PL310_ERRATA_588369 is not correctly implemented in PL310 as clean lines are not invalidated as a result of these operations. -config ARM_ERRATA_643719 - bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" - depends on CPU_V7 && SMP - help - This option enables the workaround for the 643719 Cortex-A9 (prior to - r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR - register returns zero when it should return one. The workaround - corrects this value, ensuring cache maintenance operations which use - it behave as intended and avoiding data corruption. - config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" depends on CPU_V7 @@ -2016,7 +2006,7 @@ config XIP_PHYS_ADDR config KEXEC bool "Kexec system call (EXPERIMENTAL)" - depends on (!SMP || PM_SLEEP_SMP) + depends on (!SMP || HOTPLUG_CPU) help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/trunk/arch/arm/boot/compressed/Makefile b/trunk/arch/arm/boot/compressed/Makefile index 120b83bfde20..79e9bdbfc491 100644 --- a/trunk/arch/arm/boot/compressed/Makefile +++ b/trunk/arch/arm/boot/compressed/Makefile @@ -116,8 +116,7 @@ targets := vmlinux vmlinux.lds \ # Make sure files are removed during clean extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ - lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ - hyp-stub.S + lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) diff --git a/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index ded558bb0f3b..d1650fb34c0a 100644 --- a/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/trunk/arch/arm/boot/dts/exynos5250-pinctrl.dtsi @@ -763,7 +763,7 @@ }; }; - pinctrl@03860000 { + pinctrl@03680000 { gpz: gpz { gpio-controller; #gpio-cells = <2>; diff --git a/trunk/arch/arm/boot/dts/exynos5250.dtsi b/trunk/arch/arm/boot/dts/exynos5250.dtsi index fc9fb3d526e2..0673524238a6 100644 --- a/trunk/arch/arm/boot/dts/exynos5250.dtsi +++ b/trunk/arch/arm/boot/dts/exynos5250.dtsi @@ -161,9 +161,9 @@ interrupts = <0 50 0>; }; - pinctrl_3: pinctrl@03860000 { + pinctrl_3: pinctrl@03680000 { compatible = "samsung,exynos5250-pinctrl"; - reg = <0x03860000 0x1000>; + reg = <0x0368000 0x1000>; interrupts = <0 47 0>; }; diff --git a/trunk/arch/arm/include/asm/cacheflush.h b/trunk/arch/arm/include/asm/cacheflush.h index 17d0ae8672fa..bff71388e72a 100644 --- a/trunk/arch/arm/include/asm/cacheflush.h +++ b/trunk/arch/arm/include/asm/cacheflush.h @@ -320,7 +320,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma, } #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -extern void flush_kernel_dcache_page(struct page *); +static inline void flush_kernel_dcache_page(struct page *page) +{ +} #define flush_dcache_mmap_lock(mapping) \ spin_lock_irq(&(mapping)->tree_lock) diff --git a/trunk/arch/arm/kernel/machine_kexec.c b/trunk/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..8ef8c9337809 100644 --- a/trunk/arch/arm/kernel/machine_kexec.c +++ b/trunk/arch/arm/kernel/machine_kexec.c @@ -134,10 +134,6 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; - if (num_online_cpus() > 1) { - pr_err("kexec: error: multiple CPUs still online\n"); - return; - } page_list = image->head & PAGE_MASK; diff --git a/trunk/arch/arm/kernel/process.c b/trunk/arch/arm/kernel/process.c index 6e8931ccf13e..282de4826abb 100644 --- a/trunk/arch/arm/kernel/process.c +++ b/trunk/arch/arm/kernel/process.c @@ -184,61 +184,30 @@ int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); -/* - * Called by kexec, immediately prior to machine_kexec(). - * - * This must completely disable all secondary CPUs; simply causing those CPUs - * to execute e.g. a RAM-based pin loop is not sufficient. This allows the - * kexec'd kernel to use any and all RAM as it sees fit, without having to - * avoid any code or data used by any SW CPU pin loop. The CPU hotplug - * functionality embodied in disable_nonboot_cpus() to achieve this. - */ void machine_shutdown(void) { - disable_nonboot_cpus(); +#ifdef CONFIG_SMP + smp_send_stop(); +#endif } -/* - * Halting simply requires that the secondary CPUs stop performing any - * activity (executing tasks, handling interrupts). smp_send_stop() - * achieves this. - */ void machine_halt(void) { - smp_send_stop(); - + machine_shutdown(); local_irq_disable(); while (1); } -/* - * Power-off simply requires that the secondary CPUs stop performing any - * activity (executing tasks, handling interrupts). smp_send_stop() - * achieves this. When the system power is turned off, it will take all CPUs - * with it. - */ void machine_power_off(void) { - smp_send_stop(); - + machine_shutdown(); if (pm_power_off) pm_power_off(); } -/* - * Restart requires that the secondary CPUs stop performing any activity - * while the primary CPU resets the system. Systems with a single CPU can - * use soft_restart() as their machine descriptor's .restart hook, since that - * will cause the only available CPU to reset. Systems with multiple CPUs must - * provide a HW restart implementation, to ensure that all CPUs reset at once. - * This is required so that any code running after reset on the primary CPU - * doesn't have to co-ordinate with other CPUs to ensure they aren't still - * executing pre-reset code, and using RAM that the primary CPU's code wishes - * to use. Implementing such co-ordination would be essentially impossible. - */ void machine_restart(char *cmd) { - smp_send_stop(); + machine_shutdown(); arm_pm_restart(reboot_mode, cmd); diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index 5919eb451bb9..550d63cef68e 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -651,6 +651,17 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; @@ -668,6 +679,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/trunk/arch/arm/mm/cache-v7.S b/trunk/arch/arm/mm/cache-v7.S index 515b00064da8..15451ee4acc8 100644 --- a/trunk/arch/arm/mm/cache-v7.S +++ b/trunk/arch/arm/mm/cache-v7.S @@ -92,14 +92,6 @@ ENTRY(v7_flush_dcache_louis) mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr -#ifdef CONFIG_ARM_ERRATA_643719 - ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register - ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do - ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? - biceq r2, r2, #0x0000000f @ clear minor revision number - teqeq r2, r1 @ test for errata affected core and if so... - orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') -#endif ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 moveq pc, lr @ return if level == 0 diff --git a/trunk/arch/arm/mm/flush.c b/trunk/arch/arm/mm/flush.c index 32aa5861119f..0d473cce501c 100644 --- a/trunk/arch/arm/mm/flush.c +++ b/trunk/arch/arm/mm/flush.c @@ -300,39 +300,6 @@ void flush_dcache_page(struct page *page) } EXPORT_SYMBOL(flush_dcache_page); -/* - * Ensure cache coherency for the kernel mapping of this page. We can - * assume that the page is pinned via kmap. - * - * If the page only exists in the page cache and there are no user - * space mappings, this is a no-op since the page was already marked - * dirty at creation. Otherwise, we need to flush the dirty kernel - * cache lines directly. - */ -void flush_kernel_dcache_page(struct page *page) -{ - if (cache_is_vivt() || cache_is_vipt_aliasing()) { - struct address_space *mapping; - - mapping = page_mapping(page); - - if (!mapping || mapping_mapped(mapping)) { - void *addr; - - addr = page_address(page); - /* - * kmap_atomic() doesn't set the page virtual - * address for highmem pages, and - * kunmap_atomic() takes care of cache - * flushing already. - */ - if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) - __cpuc_flush_dcache_area(addr, PAGE_SIZE); - } - } -} -EXPORT_SYMBOL(flush_kernel_dcache_page); - /* * Flush an anonymous page so that users of get_user_pages() * can safely access the data. The expected sequence is: diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 4d409e6a552d..e0d8565671a6 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -616,12 +616,10 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init __map_init_section(pmd_t *pmd, unsigned long addr, +static void __init map_init_section(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys, const struct mem_type *type) { - pmd_t *p = pmd; - #ifndef CONFIG_ARM_LPAE /* * In classic MMU format, puds and pmds are folded in to @@ -640,7 +638,7 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr, phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); - flush_pmd_entry(p); + flush_pmd_entry(pmd); } static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, @@ -663,7 +661,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, */ if (type->prot_sect && ((addr | next | phys) & ~SECTION_MASK) == 0) { - __map_init_section(pmd, addr, next, phys, type); + map_init_section(pmd, addr, next, phys, type); } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), type); diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S index 4c8c9c10a388..2c73a7301ff7 100644 --- a/trunk/arch/arm/mm/proc-v7.S +++ b/trunk/arch/arm/mm/proc-v7.S @@ -409,8 +409,8 @@ __v7_ca9mp_proc_info: */ .type __v7_pj4b_proc_info, #object __v7_pj4b_proc_info: - .long 0x560f5800 - .long 0xff0fff00 + .long 0x562f5840 + .long 0xfffffff0 __v7_proc __v7_pj4b_setup .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info diff --git a/trunk/arch/arm64/kernel/perf_event.c b/trunk/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..1e49e5eb81e9 100644 --- a/trunk/arch/arm64/kernel/perf_event.c +++ b/trunk/arch/arm64/kernel/perf_event.c @@ -1336,7 +1336,6 @@ void perf_callchain_user(struct perf_callchain_entry *entry, return; } - perf_callchain_store(entry, regs->pc); tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < PERF_MAX_STACK_DEPTH && diff --git a/trunk/arch/metag/include/asm/hugetlb.h b/trunk/arch/metag/include/asm/hugetlb.h index 471f481e67f3..f545477e61f3 100644 --- a/trunk/arch/metag/include/asm/hugetlb.h +++ b/trunk/arch/metag/include/asm/hugetlb.h @@ -2,7 +2,6 @@ #define _ASM_METAG_HUGETLB_H #include -#include static inline int is_hugepage_only_range(struct mm_struct *mm, diff --git a/trunk/arch/mn10300/include/asm/irqflags.h b/trunk/arch/mn10300/include/asm/irqflags.h index 8730c0a3c37d..678f68d5f37b 100644 --- a/trunk/arch/mn10300/include/asm/irqflags.h +++ b/trunk/arch/mn10300/include/asm/irqflags.h @@ -13,8 +13,9 @@ #define _ASM_IRQFLAGS_H #include -/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ -#include +#ifndef __ASSEMBLY__ +#include +#endif /* * interrupt control diff --git a/trunk/arch/mn10300/include/asm/smp.h b/trunk/arch/mn10300/include/asm/smp.h index 56c42417d428..6745dbe64944 100644 --- a/trunk/arch/mn10300/include/asm/smp.h +++ b/trunk/arch/mn10300/include/asm/smp.h @@ -24,7 +24,6 @@ #ifndef __ASSEMBLY__ #include #include -#include #endif #ifdef CONFIG_SMP @@ -86,7 +85,7 @@ extern cpumask_t cpu_boot_map; extern void smp_init_cpus(void); extern void smp_cache_interrupt(void); extern void send_IPI_allbutself(int irq); -extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); +extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -101,7 +100,6 @@ extern void __cpu_die(unsigned int cpu); #ifndef __ASSEMBLY__ static inline void smp_init_cpus(void) {} -#define raw_smp_processor_id() 0 #endif /* __ASSEMBLY__ */ #endif /* CONFIG_SMP */ diff --git a/trunk/arch/parisc/include/asm/mmzone.h b/trunk/arch/parisc/include/asm/mmzone.h index b6b34a0987e7..cc50d33b7b88 100644 --- a/trunk/arch/parisc/include/asm/mmzone.h +++ b/trunk/arch/parisc/include/asm/mmzone.h @@ -27,7 +27,7 @@ extern struct node_map_data node_data[]; #define PFNNID_SHIFT (30 - PAGE_SHIFT) #define PFNNID_MAP_MAX 512 /* support 512GB */ -extern signed char pfnnid_map[PFNNID_MAP_MAX]; +extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; #ifndef CONFIG_64BIT #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) @@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn) i = pfn >> PFNNID_SHIFT; BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); - return pfnnid_map[i]; + return (int)pfnnid_map[i]; } static inline int pfn_valid(int pfn) diff --git a/trunk/arch/parisc/include/asm/pci.h b/trunk/arch/parisc/include/asm/pci.h index 465154076d23..3234f492d575 100644 --- a/trunk/arch/parisc/include/asm/pci.h +++ b/trunk/arch/parisc/include/asm/pci.h @@ -225,9 +225,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } -#define HAVE_PCI_MMAP - -extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); - #endif /* __ASM_PARISC_PCI_H */ diff --git a/trunk/arch/parisc/kernel/hardware.c b/trunk/arch/parisc/kernel/hardware.c index 872275659d98..9e2d2e408529 100644 --- a/trunk/arch/parisc/kernel/hardware.c +++ b/trunk/arch/parisc/kernel/hardware.c @@ -1205,7 +1205,6 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, diff --git a/trunk/arch/parisc/kernel/pacache.S b/trunk/arch/parisc/kernel/pacache.S index b743a80eaba0..36d7f402e48e 100644 --- a/trunk/arch/parisc/kernel/pacache.S +++ b/trunk/arch/parisc/kernel/pacache.S @@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm) #endif ldil L%dcache_stride, %r1 - ldw R%dcache_stride(%r1), r31 + ldw R%dcache_stride(%r1), %r1 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, r31, %r25 - - -1: fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) - fdc,m r31(%r28) + sub %r25, %r1, %r25 + + +1: fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) + fdc,m %r1(%r28) cmpb,COND(<<) %r28, %r25,1b - fdc,m r31(%r28) + fdc,m %r1(%r28) sync @@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm) #endif ldil L%icache_stride, %r1 - ldw R%icache_stride(%r1), %r31 + ldw R%icache_stride(%r1), %r1 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, %r31, %r25 + sub %r25, %r1, %r25 /* fic only has the type 26 form on PA1.1, requiring an * explicit space specification, so use %sr4 */ -1: fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) - fic,m %r31(%sr4,%r28) +1: fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) cmpb,COND(<<) %r28, %r25,1b - fic,m %r31(%sr4,%r28) + fic,m %r1(%sr4,%r28) sync diff --git a/trunk/arch/parisc/kernel/pci.c b/trunk/arch/parisc/kernel/pci.c index 64f2764a8cef..60309051875e 100644 --- a/trunk/arch/parisc/kernel/pci.c +++ b/trunk/arch/parisc/kernel/pci.c @@ -220,33 +220,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } -int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine) -{ - unsigned long prot; - - /* - * I/O space can be accessed via normal processor loads and stores on - * this platform but for now we elect not to do this and portable - * drivers should not do this anyway. - */ - if (mmap_state == pci_mmap_io) - return -EINVAL; - - if (write_combine) - return -EINVAL; - - /* - * Ignore write-combine; for now only return uncached mappings. - */ - prot = pgprot_val(vma->vm_page_prot); - prot |= _PAGE_NO_CACHE; - vma->vm_page_prot = __pgprot(prot); - - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, vma->vm_page_prot); -} - /* * A driver is enabling the device. We make sure that all the appropriate * bits are set to allow the device to operate as the driver is expecting. diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c index 505b56c6b9b9..1c965642068b 100644 --- a/trunk/arch/parisc/mm/init.c +++ b/trunk/arch/parisc/mm/init.c @@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt #ifdef CONFIG_DISCONTIGMEM struct node_map_data node_data[MAX_NUMNODES] __read_mostly; -signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; +unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; #endif static struct resource data_resource = { diff --git a/trunk/arch/powerpc/kvm/booke.c b/trunk/arch/powerpc/kvm/booke.c index 1a1b51189773..5cd7ad0c1176 100644 --- a/trunk/arch/powerpc/kvm/booke.c +++ b/trunk/arch/powerpc/kvm/booke.c @@ -673,6 +673,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ret = s; goto out; } + kvmppc_lazy_ee_enable(); kvm_guest_enter(); @@ -698,8 +699,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif - kvmppc_lazy_ee_enable(); - ret = __kvmppc_vcpu_run(kvm_run, vcpu); /* No need for kvm_guest_exit. It's done in handle_exit. diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c index 77fdd2cef33b..237c8e5f2640 100644 --- a/trunk/arch/powerpc/mm/hugetlbpage.c +++ b/trunk/arch/powerpc/mm/hugetlbpage.c @@ -592,14 +592,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, do { pmd = pmd_offset(pud, addr); next = pmd_addr_end(addr, end); - if (!is_hugepd(pmd)) { - /* - * if it is not hugepd pointer, we should already find - * it cleared. - */ - WARN_ON(!pmd_none_or_clear_bad(pmd)); + if (pmd_none_or_clear_bad(pmd)) continue; - } #ifdef CONFIG_PPC_FSL_BOOK3E /* * Increment next by the size of the huge mapping since diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c index 028ac1f71b51..46ac1ddea683 100644 --- a/trunk/arch/powerpc/sysdev/fsl_pci.c +++ b/trunk/arch/powerpc/sysdev/fsl_pci.c @@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn, return indirect_read_config(bus, devfn, offset, len, val); } -static struct pci_ops fsl_indirect_pci_ops = +#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) + +static struct pci_ops fsl_indirect_pcie_ops = { .read = fsl_indirect_read_config, .write = indirect_write_config, }; -static void __init fsl_setup_indirect_pci(struct pci_controller* hose, - resource_size_t cfg_addr, - resource_size_t cfg_data, u32 flags) -{ - setup_indirect_pci(hose, cfg_addr, cfg_data, flags); - hose->ops = &fsl_indirect_pci_ops; -} - -#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) - #define MAX_PHYS_ADDR_BITS 40 static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; @@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary) if (!hose->private_data) goto no_bridge; - fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, - PPC_INDIRECT_TYPE_BIG_ENDIAN); + setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, + PPC_INDIRECT_TYPE_BIG_ENDIAN); if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { + /* use fsl_indirect_read_config for PCIe */ + hose->ops = &fsl_indirect_pcie_ops; /* For PCIE read HEADER_TYPE to identify controler mode */ early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) @@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev) if (ret) goto err0; } else { - fsl_setup_indirect_pci(hose, rsrc_cfg.start, - rsrc_cfg.start + 4, 0); + setup_indirect_pci(hose, rsrc_cfg.start, + rsrc_cfg.start + 4, 0); } printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " diff --git a/trunk/arch/sparc/include/asm/Kbuild b/trunk/arch/sparc/include/asm/Kbuild index 7e4a97fbded4..ff18e3cfb6b1 100644 --- a/trunk/arch/sparc/include/asm/Kbuild +++ b/trunk/arch/sparc/include/asm/Kbuild @@ -6,7 +6,6 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h -generic-y += linkage.h generic-y += local64.h generic-y += mutex.h generic-y += irq_regs.h diff --git a/trunk/arch/sparc/include/asm/leon.h b/trunk/arch/sparc/include/asm/leon.h index b836e9297f2a..15a716934e4d 100644 --- a/trunk/arch/sparc/include/asm/leon.h +++ b/trunk/arch/sparc/include/asm/leon.h @@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void) #ifdef CONFIG_SMP # define LEON3_IRQ_IPI_DEFAULT 13 -# define LEON3_IRQ_TICKER (leon3_gptimer_irq) +# define LEON3_IRQ_TICKER (leon3_ticker_irq) # define LEON3_IRQ_CROSS_CALL 15 #endif diff --git a/trunk/arch/sparc/include/asm/leon_amba.h b/trunk/arch/sparc/include/asm/leon_amba.h index 24ec48c3ff90..f3034eddf468 100644 --- a/trunk/arch/sparc/include/asm/leon_amba.h +++ b/trunk/arch/sparc/include/asm/leon_amba.h @@ -47,7 +47,6 @@ struct amba_prom_registers { #define LEON3_GPTIMER_LD 4 #define LEON3_GPTIMER_IRQEN 8 #define LEON3_GPTIMER_SEPIRQ 8 -#define LEON3_GPTIMER_TIMERS 0x7 #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ /* 0 = hold scalar and counter */ diff --git a/trunk/arch/sparc/include/asm/linkage.h b/trunk/arch/sparc/include/asm/linkage.h new file mode 100644 index 000000000000..291c2d01c44f --- /dev/null +++ b/trunk/arch/sparc/include/asm/linkage.h @@ -0,0 +1,6 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +/* Nothing to see here... */ + +#endif diff --git a/trunk/arch/sparc/kernel/ds.c b/trunk/arch/sparc/kernel/ds.c index 5ef48dab5636..75bb608c423e 100644 --- a/trunk/arch/sparc/kernel/ds.c +++ b/trunk/arch/sparc/kernel/ds.c @@ -843,8 +843,7 @@ void ldom_reboot(const char *boot_command) unsigned long len; strcpy(full_boot_str, "boot "); - strlcpy(full_boot_str + strlen("boot "), boot_command, - sizeof(full_boot_str + strlen("boot "))); + strcpy(full_boot_str + strlen("boot "), boot_command); len = strlen(full_boot_str); if (reboot_data_supported) { diff --git a/trunk/arch/sparc/kernel/leon_kernel.c b/trunk/arch/sparc/kernel/leon_kernel.c index b7c68976cbc7..7c0231dabe44 100644 --- a/trunk/arch/sparc/kernel/leon_kernel.c +++ b/trunk/arch/sparc/kernel/leon_kernel.c @@ -38,6 +38,7 @@ static DEFINE_SPINLOCK(leon_irq_lock); unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ +int leon3_ticker_irq; /* Timer ticker IRQ */ unsigned int sparc_leon_eirq; #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) #define LEON_IACK (&leon3_irqctrl_regs->iclear) @@ -277,9 +278,6 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) leon_clear_profile_irq(cpu); - if (cpu == boot_cpu_id) - timer_interrupt(irq, NULL); - ce = &per_cpu(sparc32_clockevent, cpu); irq_enter(); @@ -301,7 +299,6 @@ void __init leon_init_timers(void) int icsel; int ampopts; int err; - u32 config; sparc_config.get_cycles_offset = leon_cycles_offset; sparc_config.cs_period = 1000000 / HZ; @@ -380,6 +377,23 @@ void __init leon_init_timers(void) LEON3_BYPASS_STORE_PA( &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); +#ifdef CONFIG_SMP + leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx; + + if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & + (1<e[leon3_gptimer_idx+1].val, + 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld, + (((1000000/HZ) - 1))); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, + 0); +#endif + /* * The IRQ controller may (if implemented) consist of multiple * IRQ controllers, each mapped on a 4Kb boundary. @@ -402,6 +416,13 @@ void __init leon_init_timers(void) if (eirq != 0) leon_eirq_setup(eirq); + irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); + if (err) { + printk(KERN_ERR "unable to attach timer IRQ%d\n", irq); + prom_halt(); + } + #ifdef CONFIG_SMP { unsigned long flags; @@ -418,31 +439,30 @@ void __init leon_init_timers(void) } #endif - config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); - if (config & (1 << LEON3_GPTIMER_SEPIRQ)) - leon3_gptimer_irq += leon3_gptimer_idx; - else if ((config & LEON3_GPTIMER_TIMERS) > 1) - pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | + LEON3_GPTIMER_IRQEN); #ifdef CONFIG_SMP /* Install per-cpu IRQ handler for broadcasted ticker */ - irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, + irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, "per-cpu", 0); err = request_irq(irq, leon_percpu_timer_ce_interrupt, - IRQF_PERCPU | IRQF_TIMER, "timer", NULL); -#else - irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); - err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); -#endif + IRQF_PERCPU | IRQF_TIMER, "ticker", + NULL); if (err) { - pr_err("Unable to attach timer IRQ%d\n", irq); + printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); prom_halt(); } - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); +#endif return; bad: printk(KERN_ERR "No Timer/irqctrl found\n"); diff --git a/trunk/arch/sparc/kernel/leon_pci_grpci1.c b/trunk/arch/sparc/kernel/leon_pci_grpci1.c index 6df26e37f879..7739a54315e2 100644 --- a/trunk/arch/sparc/kernel/leon_pci_grpci1.c +++ b/trunk/arch/sparc/kernel/leon_pci_grpci1.c @@ -536,9 +536,11 @@ static int grpci1_of_probe(struct platform_device *ofdev) /* find device register base address */ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&ofdev->dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + regs = devm_request_and_ioremap(&ofdev->dev, res); + if (!regs) { + dev_err(&ofdev->dev, "io-regs mapping failed\n"); + return -EADDRNOTAVAIL; + } /* * check that we're in Host Slot and that we can act as a Host Bridge diff --git a/trunk/arch/sparc/kernel/leon_pmc.c b/trunk/arch/sparc/kernel/leon_pmc.c index b0b3967a2dd2..bdf53d9a8d46 100644 --- a/trunk/arch/sparc/kernel/leon_pmc.c +++ b/trunk/arch/sparc/kernel/leon_pmc.c @@ -47,10 +47,6 @@ void pmc_leon_idle_fixup(void) * MMU does not get a TLB miss here by using the MMU BYPASS ASI. */ register unsigned int address = (unsigned int)leon3_irqctrl_regs; - - /* Interrupts need to be enabled to not hang the CPU */ - local_irq_enable(); - __asm__ __volatile__ ( "wr %%g0, %%asr19\n" "lda [%0] %1, %%g0\n" @@ -64,9 +60,6 @@ void pmc_leon_idle_fixup(void) */ void pmc_leon_idle(void) { - /* Interrupts need to be enabled to not hang the CPU */ - local_irq_enable(); - /* For systems without power-down, this will be no-op */ __asm__ __volatile__ ("wr %g0, %asr19\n\t"); } diff --git a/trunk/arch/sparc/kernel/setup_32.c b/trunk/arch/sparc/kernel/setup_32.c index 1434526970a6..38bf80a22f02 100644 --- a/trunk/arch/sparc/kernel/setup_32.c +++ b/trunk/arch/sparc/kernel/setup_32.c @@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + strcpy(boot_command_line, *cmdline_p); parse_early_param(); boot_flags_init(*cmdline_p); diff --git a/trunk/arch/sparc/kernel/setup_64.c b/trunk/arch/sparc/kernel/setup_64.c index 13785547e435..88a127b9c69e 100644 --- a/trunk/arch/sparc/kernel/setup_64.c +++ b/trunk/arch/sparc/kernel/setup_64.c @@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + strcpy(boot_command_line, *cmdline_p); parse_early_param(); boot_flags_init(*cmdline_p); diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c index 04fd55a6e461..a7171997adfd 100644 --- a/trunk/arch/sparc/mm/init_64.c +++ b/trunk/arch/sparc/mm/init_64.c @@ -1098,14 +1098,7 @@ static int __init grab_mblocks(struct mdesc_handle *md) m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); - - /* The address-congruence-offset property is optional. - * Explicity zero it be identifty this. - */ - if (val) - m->offset = *val; - else - m->offset = 0UL; + m->offset = *val; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); diff --git a/trunk/arch/sparc/mm/tlb.c b/trunk/arch/sparc/mm/tlb.c index 37e7bc4c95b3..83d89bcb44af 100644 --- a/trunk/arch/sparc/mm/tlb.c +++ b/trunk/arch/sparc/mm/tlb.c @@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr); global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); goto out; } diff --git a/trunk/arch/sparc/prom/bootstr_32.c b/trunk/arch/sparc/prom/bootstr_32.c index d2b49d2365e7..f5ec32e0d419 100644 --- a/trunk/arch/sparc/prom/bootstr_32.c +++ b/trunk/arch/sparc/prom/bootstr_32.c @@ -23,25 +23,23 @@ prom_getbootargs(void) return barg_buf; } - switch (prom_vers) { + switch(prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ - for (iter = 1; iter < 8; iter++) { + for(iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; - while (*arg != 0) { + while(*arg != 0) { /* Leave place for space and null. */ - if (cp >= barg_buf + BARG_LEN - 2) + if(cp >= barg_buf + BARG_LEN-2){ /* We might issue a warning here. */ break; + } *cp++ = *arg++; } *cp++ = ' '; - if (cp >= barg_buf + BARG_LEN - 1) - /* We might issue a warning here. */ - break; } *cp = 0; break; diff --git a/trunk/arch/sparc/prom/tree_64.c b/trunk/arch/sparc/prom/tree_64.c index bd1b2a3ac34e..92204c3800b5 100644 --- a/trunk/arch/sparc/prom/tree_64.c +++ b/trunk/arch/sparc/prom/tree_64.c @@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node) return prom_node_to_node("child", node); } -phandle prom_getchild(phandle node) +inline phandle prom_getchild(phandle node) { phandle cnode; @@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node) return prom_node_to_node(prom_peer_name, node); } -phandle prom_getsibling(phandle node) +inline phandle prom_getsibling(phandle node) { phandle sibnode; @@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ -int prom_getproplen(phandle node, const char *prop) +inline int prom_getproplen(phandle node, const char *prop) { unsigned long args[6]; @@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen); * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ -int prom_getproperty(phandle node, const char *prop, - char *buffer, int bufsize) +inline int prom_getproperty(phandle node, const char *prop, + char *buffer, int bufsize) { unsigned long args[8]; int plen; @@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ -int prom_getint(phandle node, const char *prop) +inline int prom_getint(phandle node, const char *prop) { int intprop; @@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop"; /* Return the first property type for node 'node'. * buffer should be at least 32B in length */ -char *prom_firstprop(phandle node, char *buffer) +inline char *prom_firstprop(phandle node, char *buffer) { unsigned long args[7]; @@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop); * at node 'node' . Returns NULL string if no more * property types for this node. */ -char *prom_nextprop(phandle node, const char *oprop, char *buffer) +inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) { unsigned long args[7]; char buf[32]; diff --git a/trunk/arch/tile/lib/exports.c b/trunk/arch/tile/lib/exports.c index a93b02a25222..4385cb6fa00a 100644 --- a/trunk/arch/tile/lib/exports.c +++ b/trunk/arch/tile/lib/exports.c @@ -84,6 +84,4 @@ uint64_t __ashrdi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashrdi3); uint64_t __ashldi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashldi3); -int __ffsdi2(uint64_t); -EXPORT_SYMBOL(__ffsdi2); #endif diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c index 3df3bd544492..d7d21851e60c 100644 --- a/trunk/arch/um/drivers/mconsole_kern.c +++ b/trunk/arch/um/drivers/mconsole_kern.c @@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req) } do { - loff_t pos = file->f_pos; + loff_t pos; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index fe120da25625..685692c94f05 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -2265,7 +2265,6 @@ source "fs/Kconfig.binfmt" config IA32_EMULATION bool "IA32 Emulation" depends on X86_64 - select BINFMT_ELF select COMPAT_BINFMT_ELF select HAVE_UID16 ---help--- diff --git a/trunk/arch/x86/crypto/aesni-intel_asm.S b/trunk/arch/x86/crypto/aesni-intel_asm.S index 477e9d75149b..62fe22cd4cba 100644 --- a/trunk/arch/x86/crypto/aesni-intel_asm.S +++ b/trunk/arch/x86/crypto/aesni-intel_asm.S @@ -2681,68 +2681,56 @@ ENTRY(aesni_xts_crypt8) addq %rcx, KEYP movdqa IV, STATE1 - movdqu 0x00(INP), INC - pxor INC, STATE1 + pxor 0x00(INP), STATE1 movdqu IV, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x10(INP), INC - pxor INC, STATE2 + pxor 0x10(INP), STATE2 movdqu IV, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x20(INP), INC - pxor INC, STATE3 + pxor 0x20(INP), STATE3 movdqu IV, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x30(INP), INC - pxor INC, STATE4 + pxor 0x30(INP), STATE4 movdqu IV, 0x30(OUTP) call *%r11 - movdqu 0x00(OUTP), INC - pxor INC, STATE1 + pxor 0x00(OUTP), STATE1 movdqu STATE1, 0x00(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE1 - movdqu 0x40(INP), INC - pxor INC, STATE1 + pxor 0x40(INP), STATE1 movdqu IV, 0x40(OUTP) - movdqu 0x10(OUTP), INC - pxor INC, STATE2 + pxor 0x10(OUTP), STATE2 movdqu STATE2, 0x10(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE2 - movdqu 0x50(INP), INC - pxor INC, STATE2 + pxor 0x50(INP), STATE2 movdqu IV, 0x50(OUTP) - movdqu 0x20(OUTP), INC - pxor INC, STATE3 + pxor 0x20(OUTP), STATE3 movdqu STATE3, 0x20(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE3 - movdqu 0x60(INP), INC - pxor INC, STATE3 + pxor 0x60(INP), STATE3 movdqu IV, 0x60(OUTP) - movdqu 0x30(OUTP), INC - pxor INC, STATE4 + pxor 0x30(OUTP), STATE4 movdqu STATE4, 0x30(OUTP) _aesni_gf128mul_x_ble() movdqa IV, STATE4 - movdqu 0x70(INP), INC - pxor INC, STATE4 + pxor 0x70(INP), STATE4 movdqu IV, 0x70(OUTP) _aesni_gf128mul_x_ble() @@ -2750,20 +2738,16 @@ ENTRY(aesni_xts_crypt8) call *%r11 - movdqu 0x40(OUTP), INC - pxor INC, STATE1 + pxor 0x40(OUTP), STATE1 movdqu STATE1, 0x40(OUTP) - movdqu 0x50(OUTP), INC - pxor INC, STATE2 + pxor 0x50(OUTP), STATE2 movdqu STATE2, 0x50(OUTP) - movdqu 0x60(OUTP), INC - pxor INC, STATE3 + pxor 0x60(OUTP), STATE3 movdqu STATE3, 0x60(OUTP) - movdqu 0x70(OUTP), INC - pxor INC, STATE4 + pxor 0x70(OUTP), STATE4 movdqu STATE4, 0x70(OUTP) ret diff --git a/trunk/arch/x86/ia32/ia32_aout.c b/trunk/arch/x86/ia32/ia32_aout.c index 52ff81cce008..805078e08013 100644 --- a/trunk/arch/x86/ia32/ia32_aout.c +++ b/trunk/arch/x86/ia32/ia32_aout.c @@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, /* struct user */ DUMP_WRITE(&dump, sizeof(dump)); /* Now dump all of the user data. Include malloced stuff as well */ - DUMP_SEEK(PAGE_SIZE - sizeof(dump)); + DUMP_SEEK(PAGE_SIZE); /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ diff --git a/trunk/arch/x86/include/asm/irq.h b/trunk/arch/x86/include/asm/irq.h index 57873beb3292..ba870bb6dd8e 100644 --- a/trunk/arch/x86/include/asm/irq.h +++ b/trunk/arch/x86/include/asm/irq.h @@ -41,9 +41,4 @@ extern int vector_used_by_percpu_irq(unsigned int vector); extern void init_ISA_irqs(void); -#ifdef CONFIG_X86_LOCAL_APIC -void arch_trigger_all_cpu_backtrace(void); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace -#endif - #endif /* _ASM_X86_IRQ_H */ diff --git a/trunk/arch/x86/include/asm/microcode.h b/trunk/arch/x86/include/asm/microcode.h index 6bc3985ee473..6825e2efd1b4 100644 --- a/trunk/arch/x86/include/asm/microcode.h +++ b/trunk/arch/x86/include/asm/microcode.h @@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} #ifdef CONFIG_MICROCODE_EARLY #define MAX_UCODE_COUNT 128 extern void __init load_ucode_bsp(void); -extern void __cpuinit load_ucode_ap(void); +extern __init void load_ucode_ap(void); extern int __init save_microcode_in_initrd(void); #else static inline void __init load_ucode_bsp(void) {} -static inline void __cpuinit load_ucode_ap(void) {} +static inline __init void load_ucode_ap(void) {} static inline int __init save_microcode_in_initrd(void) { return 0; diff --git a/trunk/arch/x86/include/asm/nmi.h b/trunk/arch/x86/include/asm/nmi.h index 86f9301903c8..c0fa356e90de 100644 --- a/trunk/arch/x86/include/asm/nmi.h +++ b/trunk/arch/x86/include/asm/nmi.h @@ -18,7 +18,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , void __user *, size_t *, loff_t *); extern int unknown_nmi_panic; -#endif /* CONFIG_X86_LOCAL_APIC */ +void arch_trigger_all_cpu_backtrace(void); +#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +#endif #define NMI_FLAG_FIRST 1 diff --git a/trunk/arch/x86/kernel/apic/hw_nmi.c b/trunk/arch/x86/kernel/apic/hw_nmi.c index a698d7165c96..31cb9ae992b7 100644 --- a/trunk/arch/x86/kernel/apic/hw_nmi.c +++ b/trunk/arch/x86/kernel/apic/hw_nmi.c @@ -9,7 +9,6 @@ * */ #include -#include #include #include diff --git a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c index 5f90b85ff22e..35ffda5d0727 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits) if (mtrr_tom2) x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; + nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); /* * [0, 1M) should always be covered by var mtrr with WB * and fixed mtrrs should take effect before var mtrr for it: */ - nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, + nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, 1ULL<<(20 - PAGE_SHIFT)); - /* add from var mtrr at last */ - nr_range = x86_get_mtrr_mem_range(range, nr_range, - x_remove_base, x_remove_size); + /* Sort the ranges: */ + sort_range(range, nr_range); range_sums = sum_ranges(range, nr_range); printk(KERN_INFO "total RAM covered: %ldM\n", diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index a9e22073bd56..f60d41ff9a97 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), - INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), EVENT_EXTRA_END }; diff --git a/trunk/arch/x86/kernel/kvmclock.c b/trunk/arch/x86/kernel/kvmclock.c index 3dd37ebd591b..d2c381280e3c 100644 --- a/trunk/arch/x86/kernel/kvmclock.c +++ b/trunk/arch/x86/kernel/kvmclock.c @@ -242,7 +242,6 @@ void __init kvmclock_init(void) if (!mem) return; hv_clock = __va(mem); - memset(hv_clock, 0, size); if (kvm_register_clock("boot clock")) { hv_clock = NULL; diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index 81a5f5e8f142..4e7a37ff03ab 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -277,6 +277,18 @@ void exit_idle(void) } #endif +void arch_cpu_idle_prepare(void) +{ + /* + * If we're the non-boot CPU, nothing set the stack canary up + * for us. CPU0 already has it initialized but no harm in + * doing it again. This is a good place for updating it, as + * we wont ever return from this function (so the invalid + * canaries already on the stack wont ever trigger). + */ + boot_init_stack_canary(); +} + void arch_cpu_idle_enter(void) { local_touch_nmi(); diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index bfd348e99369..9c73b51817e4 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) void __cpuinit set_cpu_sibling_map(int cpu) { + bool has_mc = boot_cpu_data.x86_max_cores > 1; bool has_smt = smp_num_siblings > 1; - bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *o; int i; cpumask_set_cpu(cpu, cpu_sibling_setup_mask); - if (!has_mp) { + if (!has_smt && !has_mc) { cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); cpumask_set_cpu(cpu, cpu_core_mask(cpu)); @@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) if ((i == cpu) || (has_smt && match_smt(c, o))) link_mask(sibling, cpu, i); - if ((i == cpu) || (has_mp && match_llc(c, o))) + if ((i == cpu) || (has_mc && match_llc(c, o))) link_mask(llc_shared, cpu, i); } @@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) for_each_cpu(i, cpu_sibling_setup_mask) { o = &cpu_data(i); - if ((i == cpu) || (has_mp && match_mc(c, o))) { + if ((i == cpu) || (has_mc && match_mc(c, o))) { link_mask(core, cpu, i); /* diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index e8ba99c34180..094b5d96ab14 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -582,6 +582,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; + if (kvm_x86_ops->get_cpl(vcpu) != 0) + return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) @@ -595,8 +597,7 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (kvm_x86_ops->get_cpl(vcpu) != 0 || - __kvm_set_xcr(vcpu, index, xcr)) { + if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index d2fbcedcf6ea..5ae2eb09419e 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -1069,10 +1069,7 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) * that by attempting to use more space than is available. */ unsigned long dummy_size = remaining_size + 1024; - void *dummy = kzalloc(dummy_size, GFP_ATOMIC); - - if (!dummy) - return EFI_OUT_OF_RESOURCES; + void *dummy = kmalloc(dummy_size, GFP_ATOMIC); status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, EFI_VARIABLE_NON_VOLATILE | @@ -1092,8 +1089,6 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) 0, dummy); } - kfree(dummy); - /* * The runtime code may now have triggered a garbage collection * run, so check the variable info again diff --git a/trunk/drivers/acpi/acpi_lpss.c b/trunk/drivers/acpi/acpi_lpss.c index cab13f2fc28e..652fd5ce303c 100644 --- a/trunk/drivers/acpi/acpi_lpss.c +++ b/trunk/drivers/acpi/acpi_lpss.c @@ -164,24 +164,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev, if (dev_desc->clk_required) { ret = register_device_clock(adev, pdata); if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; + /* + * Skip the device, but don't terminate the namespace + * scan. + */ + kfree(pdata); + return 0; } } - /* - * This works around a known issue in ACPI tables where LPSS devices - * have _PS0 and _PS3 without _PSC (and no power resources), so - * acpi_bus_init_power() will assume that the BIOS has put them into D0. - */ - ret = acpi_device_fix_up_power(adev); - if (ret) { - /* Skip the device, but continue the namespace scan. */ - ret = 0; - goto err_out; - } - adev->driver_data = pdata; ret = acpi_create_platform_device(adev, id); if (ret > 0) diff --git a/trunk/drivers/acpi/device_pm.c b/trunk/drivers/acpi/device_pm.c index 31c217a42839..318fa32a141e 100644 --- a/trunk/drivers/acpi/device_pm.c +++ b/trunk/drivers/acpi/device_pm.c @@ -290,26 +290,6 @@ int acpi_bus_init_power(struct acpi_device *device) return 0; } -/** - * acpi_device_fix_up_power - Force device with missing _PSC into D0. - * @device: Device object whose power state is to be fixed up. - * - * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, - * are assumed to be put into D0 by the BIOS. However, in some cases that may - * not be the case and this function should be used then. - */ -int acpi_device_fix_up_power(struct acpi_device *device) -{ - int ret = 0; - - if (!device->power.flags.power_resources - && !device->power.flags.explicit_get - && device->power.state == ACPI_STATE_D0) - ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); - - return ret; -} - int acpi_bus_update_power(acpi_handle handle, int *state_p) { struct acpi_device *device; diff --git a/trunk/drivers/acpi/dock.c b/trunk/drivers/acpi/dock.c index ec117c6c996c..4fdea381ef21 100644 --- a/trunk/drivers/acpi/dock.c +++ b/trunk/drivers/acpi/dock.c @@ -868,10 +868,8 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, if (!count) return -EINVAL; - acpi_scan_lock_acquire(); begin_undock(dock_station); ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); - acpi_scan_lock_release(); return ret ? ret: count; } static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c index 288bb270f8ed..f962047c6c85 100644 --- a/trunk/drivers/acpi/power.c +++ b/trunk/drivers/acpi/power.c @@ -885,7 +885,6 @@ int acpi_add_power_resource(acpi_handle handle) ACPI_STA_DEFAULT); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->dependent); - INIT_LIST_HEAD(&resource->list_node); resource->name = device->pnp.bus_id; strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); diff --git a/trunk/drivers/acpi/resource.c b/trunk/drivers/acpi/resource.c index 3322b47ab7ca..a3868f6c222a 100644 --- a/trunk/drivers/acpi/resource.c +++ b/trunk/drivers/acpi/resource.c @@ -304,8 +304,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) } static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, - u8 triggering, u8 polarity, u8 shareable, - bool legacy) + u8 triggering, u8 polarity, u8 shareable) { int irq, p, t; @@ -318,19 +317,14 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, * In IO-APIC mode, use overrided attribute. Two reasons: * 1. BIOS bug in DSDT * 2. BIOS uses IO-APIC mode Interrupt Source Override - * - * We do this only if we are dealing with IRQ() or IRQNoFlags() - * resource (the legacy ISA resources). With modern ACPI 5 devices - * using extended IRQ descriptors we take the IRQ configuration - * from _CRS directly. */ - if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { + if (!acpi_get_override_irq(gsi, &t, &p)) { u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "level" : "edge", p ? "low" : "high"); + t ? "edge" : "level", p ? "low" : "high"); triggering = trig; polarity = pol; } @@ -379,7 +373,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->sharable, true); + irq->sharable); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; @@ -389,7 +383,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, ext_irq->interrupts[index], ext_irq->triggering, ext_irq->polarity, - ext_irq->sharable, false); + ext_irq->sharable); break; default: return false; diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c index 01e21037d8fe..4b1f9265887f 100644 --- a/trunk/drivers/base/firmware_class.c +++ b/trunk/drivers/base/firmware_class.c @@ -450,18 +450,8 @@ static void fw_load_abort(struct firmware_priv *fw_priv) { struct firmware_buf *buf = fw_priv->buf; - /* - * There is a small window in which user can write to 'loading' - * between loading done and disappearance of 'loading' - */ - if (test_bit(FW_STATUS_DONE, &buf->status)) - return; - set_bit(FW_STATUS_ABORT, &buf->status); complete_all(&buf->completion); - - /* avoid user action after loading abort */ - fw_priv->buf = NULL; } #define is_fw_load_aborted(buf) \ @@ -538,12 +528,7 @@ static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - int loading = 0; - - mutex_lock(&fw_lock); - if (fw_priv->buf) - loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); - mutex_unlock(&fw_lock); + int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); return sprintf(buf, "%d\n", loading); } @@ -585,12 +570,12 @@ static ssize_t firmware_loading_store(struct device *dev, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - struct firmware_buf *fw_buf; + struct firmware_buf *fw_buf = fw_priv->buf; int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); - fw_buf = fw_priv->buf; + if (!fw_buf) goto out; @@ -792,6 +777,10 @@ static void firmware_class_timeout_work(struct work_struct *work) struct firmware_priv, timeout_work.work); mutex_lock(&fw_lock); + if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { + mutex_unlock(&fw_lock); + return; + } fw_load_abort(fw_priv); mutex_unlock(&fw_lock); } @@ -872,6 +861,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, cancel_delayed_work_sync(&fw_priv->timeout_work); + fw_priv->buf = NULL; + device_remove_file(f_dev, &dev_attr_loading); err_del_bin_attr: device_remove_bin_file(f_dev, &firmware_attr_data); diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index 49394e3f31bc..3063452e55da 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -1036,16 +1036,12 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) char *name; u64 segment; int ret; - char *name_format; name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL; segment = offset >> rbd_dev->header.obj_order; - name_format = "%s.%012llx"; - if (rbd_dev->image_format == 2) - name_format = "%s.%016llx"; - ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, + ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", rbd_dev->header.object_prefix, segment); if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { pr_err("error formatting segment name for #%llu (%d)\n", diff --git a/trunk/drivers/gpu/drm/drm_prime.c b/trunk/drivers/gpu/drm/drm_prime.c index 5b7b9110254b..dcde35231e25 100644 --- a/trunk/drivers/gpu/drm/drm_prime.c +++ b/trunk/drivers/gpu/drm/drm_prime.c @@ -190,7 +190,8 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, if (ret) return ERR_PTR(ret); } - return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, + 0600); } EXPORT_SYMBOL(drm_gem_prime_export); diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 6948eb88c2b7..0e5341695922 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -2687,9 +2687,6 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev) int r600_uvd_init(struct radeon_device *rdev) { int i, j, r; - /* disable byte swapping */ - u32 lmi_swap_cntl = 0; - u32 mp_swap_cntl = 0; /* raise clocks while booting up the VCPU */ radeon_set_uvd_clocks(rdev, 53300, 40000); @@ -2714,13 +2711,9 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | (1 << 21) | (1 << 9) | (1 << 20)); -#ifdef __BIG_ENDIAN - /* swap (8 in 32) RB and IB */ - lmi_swap_cntl = 0xa; - mp_swap_cntl = 0; -#endif - WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); + /* disable byte swapping */ + WREG32(UVD_LMI_SWAP_CNTL, 0); + WREG32(UVD_MP_SWAP_CNTL, 0); WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); WREG32(UVD_MPC_SET_MUXA1, 0x0); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index b0dc0b6cb4e0..189973836cff 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -244,6 +244,16 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) */ void radeon_wb_disable(struct radeon_device *rdev) { + int r; + + if (rdev->wb.wb_obj) { + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) + return; + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + } rdev->wb.enabled = false; } @@ -259,11 +269,6 @@ void radeon_wb_fini(struct radeon_device *rdev) { radeon_wb_disable(rdev); if (rdev->wb.wb_obj) { - if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - } radeon_bo_unref(&rdev->wb.wb_obj); rdev->wb.wb = NULL; rdev->wb.wb_obj = NULL; @@ -290,26 +295,26 @@ int radeon_wb_init(struct radeon_device *rdev) dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); return r; } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->wb.wb_obj); - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + } + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); + if (r) { radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); - radeon_wb_fini(rdev); - return r; - } + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + radeon_bo_unreserve(rdev->wb.wb_obj); + if (r) { + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; } /* clear wb memory */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c index ddb8f8e04eb5..5b937dfe6f65 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c @@ -63,9 +63,7 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) { struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - if (drv->cpu_addr) { - *drv->cpu_addr = cpu_to_le32(seq); - } + *drv->cpu_addr = cpu_to_le32(seq); } else { WREG32(drv->scratch_reg, seq); } @@ -86,11 +84,7 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) u32 seq = 0; if (likely(rdev->wb.enabled || !drv->scratch_reg)) { - if (drv->cpu_addr) { - seq = le32_to_cpu(*drv->cpu_addr); - } else { - seq = lower_32_bits(atomic64_read(&drv->last_seq)); - } + seq = le32_to_cpu(*drv->cpu_addr); } else { seq = RREG32(drv->scratch_reg); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index 43ec4a401f07..2c1341f63dc5 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -1197,13 +1197,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo_va *bo_va) { - int r = 0; + int r; mutex_lock(&rdev->vm_manager.lock); mutex_lock(&bo_va->vm->mutex); - if (bo_va->soffset) { - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); - } + r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); mutex_unlock(&bo_va->vm->mutex); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c index 82434018cbe8..e17faa7cf732 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c @@ -402,13 +402,6 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ - radeon_ring_free_size(rdev, ring); - if (ring->ring_free_dw == (ring->ring_size / 4)) { - /* This is an empty ring update lockup info to avoid - * false positive. - */ - radeon_ring_lockup_update(ring); - } ndw = (ndw + ring->align_mask) & ~ring->align_mask; while (ndw > (ring->ring_free_dw - 1)) { radeon_ring_free_size(rdev, ring); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_uvd.c b/trunk/drivers/gpu/drm/radeon/radeon_uvd.c index cad735dd02c6..906e5c0ca3b9 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_uvd.c @@ -159,17 +159,7 @@ int radeon_uvd_suspend(struct radeon_device *rdev) if (!r) { radeon_bo_kunmap(rdev->uvd.vcpu_bo); radeon_bo_unpin(rdev->uvd.vcpu_bo); - rdev->uvd.cpu_addr = NULL; - if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { - radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); - } radeon_bo_unreserve(rdev->uvd.vcpu_bo); - - if (rdev->uvd.cpu_addr) { - radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); - } else { - rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; - } } return r; } @@ -188,10 +178,6 @@ int radeon_uvd_resume(struct radeon_device *rdev) return r; } - /* Have been pin in cpu unmap unpin */ - radeon_bo_kunmap(rdev->uvd.vcpu_bo); - radeon_bo_unpin(rdev->uvd.vcpu_bo); - r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, &rdev->uvd.gpu_addr); if (r) { @@ -627,19 +613,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD create msg */ - msg[0] = cpu_to_le32(0x00000de4); - msg[1] = cpu_to_le32(0x00000000); - msg[2] = cpu_to_le32(handle); - msg[3] = cpu_to_le32(0x00000000); - msg[4] = cpu_to_le32(0x00000000); - msg[5] = cpu_to_le32(0x00000000); - msg[6] = cpu_to_le32(0x00000000); - msg[7] = cpu_to_le32(0x00000780); - msg[8] = cpu_to_le32(0x00000440); - msg[9] = cpu_to_le32(0x00000000); - msg[10] = cpu_to_le32(0x01b37000); + msg[0] = 0x00000de4; + msg[1] = 0x00000000; + msg[2] = handle; + msg[3] = 0x00000000; + msg[4] = 0x00000000; + msg[5] = 0x00000000; + msg[6] = 0x00000000; + msg[7] = 0x00000780; + msg[8] = 0x00000440; + msg[9] = 0x00000000; + msg[10] = 0x01b37000; for (i = 11; i < 1024; ++i) - msg[i] = cpu_to_le32(0x0); + msg[i] = 0x0; radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); @@ -673,12 +659,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, } /* stitch together an UVD destroy msg */ - msg[0] = cpu_to_le32(0x00000de4); - msg[1] = cpu_to_le32(0x00000002); - msg[2] = cpu_to_le32(handle); - msg[3] = cpu_to_le32(0x00000000); + msg[0] = 0x00000de4; + msg[1] = 0x00000002; + msg[2] = handle; + msg[3] = 0x00000000; for (i = 4; i < 1024; ++i) - msg[i] = cpu_to_le32(0x0); + msg[i] = 0x0; radeon_bo_kunmap(bo); radeon_bo_unreserve(bo); diff --git a/trunk/drivers/input/joystick/xpad.c b/trunk/drivers/input/joystick/xpad.c index fa061d46527f..d6cbfe9df218 100644 --- a/trunk/drivers/input/joystick/xpad.c +++ b/trunk/drivers/input/joystick/xpad.c @@ -137,7 +137,7 @@ static const struct xpad_device { { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, - { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, diff --git a/trunk/drivers/input/keyboard/Kconfig b/trunk/drivers/input/keyboard/Kconfig index 7ac9c9818d55..62a2c0e4cc99 100644 --- a/trunk/drivers/input/keyboard/Kconfig +++ b/trunk/drivers/input/keyboard/Kconfig @@ -431,7 +431,6 @@ config KEYBOARD_TEGRA config KEYBOARD_OPENCORES tristate "OpenCores Keyboard Controller" - depends on HAS_IOMEM help Say Y here if you want to use the OpenCores Keyboard Controller http://www.opencores.org/project,keyboardcontroller diff --git a/trunk/drivers/input/serio/Kconfig b/trunk/drivers/input/serio/Kconfig index 1bda828f4b55..aebfe3ecb945 100644 --- a/trunk/drivers/input/serio/Kconfig +++ b/trunk/drivers/input/serio/Kconfig @@ -205,7 +205,6 @@ config SERIO_XILINX_XPS_PS2 config SERIO_ALTERA_PS2 tristate "Altera UP PS/2 controller" - depends on HAS_IOMEM help Say Y here if you have Altera University Program PS/2 ports. diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 384fbcd0cee0..518282da6d85 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -363,7 +363,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ case 0x160802: /* Cintiq 13HD Pro Pen */ case 0x180802: /* DTH2242 Pen */ - case 0x100802: /* Intuos4/5 13HD/24HD General Pen */ wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -402,7 +401,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ case 0x18080a: /* DTH2242 Eraser */ - case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */ wacom->tool[idx] = BTN_TOOL_RUBBER; break; diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.c b/trunk/drivers/input/touchscreen/cyttsp_core.c index ae89d2609ab0..8e60437ac85b 100644 --- a/trunk/drivers/input/touchscreen/cyttsp_core.c +++ b/trunk/drivers/input/touchscreen/cyttsp_core.c @@ -116,15 +116,6 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd) return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); } -static int cyttsp_handshake(struct cyttsp *ts) -{ - if (ts->pdata->use_hndshk) - return ttsp_send_command(ts, - ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); - - return 0; -} - static int cyttsp_load_bl_regs(struct cyttsp *ts) { memset(&ts->bl_data, 0, sizeof(ts->bl_data)); @@ -142,7 +133,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts) memcpy(bl_cmd, bl_command, sizeof(bl_command)); if (ts->pdata->bl_keys) memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], - ts->pdata->bl_keys, CY_NUM_BL_KEYS); + ts->pdata->bl_keys, sizeof(bl_command)); error = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); @@ -176,10 +167,6 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts) if (error) return error; - error = cyttsp_handshake(ts); - if (error) - return error; - return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; } @@ -201,10 +188,6 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts) if (error) return error; - error = cyttsp_handshake(ts); - if (error) - return error; - if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) return -EIO; @@ -361,9 +344,12 @@ static irqreturn_t cyttsp_irq(int irq, void *handle) goto out; /* provide flow control handshake */ - error = cyttsp_handshake(ts); - if (error) - goto out; + if (ts->pdata->use_hndshk) { + error = ttsp_send_command(ts, + ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); + if (error) + goto out; + } if (unlikely(ts->state == CY_IDLE_STATE)) goto out; diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.h b/trunk/drivers/input/touchscreen/cyttsp_core.h index f1ebde369f86..1aa3c6967e70 100644 --- a/trunk/drivers/input/touchscreen/cyttsp_core.h +++ b/trunk/drivers/input/touchscreen/cyttsp_core.h @@ -67,8 +67,8 @@ struct cyttsp_xydata { /* TTSP System Information interface definition */ struct cyttsp_sysinfo_data { u8 hst_mode; - u8 mfg_stat; u8 mfg_cmd; + u8 mfg_stat; u8 cid[3]; u8 tt_undef1; u8 uid[8]; diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c index 19ceaa60e0f4..1760ceb68b7b 100644 --- a/trunk/drivers/irqchip/irq-gic.c +++ b/trunk/drivers/irqchip/irq-gic.c @@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d, static int __cpuinit gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + if (action == CPU_STARTING) gic_cpu_init(&gic_data[0]); return NOTIFY_OK; } diff --git a/trunk/drivers/media/Kconfig b/trunk/drivers/media/Kconfig index 8270388e2a0d..7f5a7cac6dc7 100644 --- a/trunk/drivers/media/Kconfig +++ b/trunk/drivers/media/Kconfig @@ -136,9 +136,9 @@ config DVB_NET # This Kconfig option is used by both PCI and USB drivers config TTPCI_EEPROM - tristate - depends on I2C - default n + tristate + depends on I2C + default n source "drivers/media/dvb-core/Kconfig" @@ -189,12 +189,6 @@ config MEDIA_SUBDRV_AUTOSELECT If unsure say Y. -config MEDIA_ATTACH - bool - depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT - depends on MODULES - default MODULES - source "drivers/media/i2c/Kconfig" source "drivers/media/tuners/Kconfig" source "drivers/media/dvb-frontends/Kconfig" diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is.c b/trunk/drivers/media/platform/exynos4-is/fimc-is.c index 0741945b79ed..520e4398b69c 100644 --- a/trunk/drivers/media/platform/exynos4-is/fimc-is.c +++ b/trunk/drivers/media/platform/exynos4-is/fimc-is.c @@ -834,11 +834,23 @@ static int fimc_is_probe(struct platform_device *pdev) goto err_clk; } pm_runtime_enable(dev); - + /* + * Enable only the ISP power domain, keep FIMC-IS clocks off until + * the whole clock tree is configured. The ISP power domain needs + * be active in order to acces any CMU_ISP clock registers. + */ ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_irq; + ret = fimc_is_setup_clocks(is); + pm_runtime_put_sync(dev); + + if (ret < 0) + goto err_irq; + + is->clk_init = true; + is->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(is->alloc_ctx)) { ret = PTR_ERR(is->alloc_ctx); @@ -860,8 +872,6 @@ static int fimc_is_probe(struct platform_device *pdev) if (ret < 0) goto err_dfs; - pm_runtime_put_sync(dev); - dev_dbg(dev, "FIMC-IS registered successfully\n"); return 0; @@ -881,11 +891,9 @@ static int fimc_is_probe(struct platform_device *pdev) static int fimc_is_runtime_resume(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); - int ret; - ret = fimc_is_setup_clocks(is); - if (ret) - return ret; + if (!is->clk_init) + return 0; return fimc_is_enable_clocks(is); } @@ -894,7 +902,9 @@ static int fimc_is_runtime_suspend(struct device *dev) { struct fimc_is *is = dev_get_drvdata(dev); - fimc_is_disable_clocks(is); + if (is->clk_init) + fimc_is_disable_clocks(is); + return 0; } diff --git a/trunk/drivers/media/platform/exynos4-is/fimc-is.h b/trunk/drivers/media/platform/exynos4-is/fimc-is.h index d7db133b493f..606a7c9fe526 100644 --- a/trunk/drivers/media/platform/exynos4-is/fimc-is.h +++ b/trunk/drivers/media/platform/exynos4-is/fimc-is.h @@ -264,6 +264,7 @@ struct fimc_is { spinlock_t slock; struct clk *clocks[ISS_CLKS_MAX]; + bool clk_init; void __iomem *regs; void __iomem *pmu_regs; int irq; diff --git a/trunk/drivers/media/platform/s5p-jpeg/Makefile b/trunk/drivers/media/platform/s5p-jpeg/Makefile index d18cb5edd2d5..ddc2900d88a2 100644 --- a/trunk/drivers/media/platform/s5p-jpeg/Makefile +++ b/trunk/drivers/media/platform/s5p-jpeg/Makefile @@ -1,2 +1,2 @@ s5p-jpeg-objs := jpeg-core.o -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o diff --git a/trunk/drivers/media/platform/s5p-mfc/Makefile b/trunk/drivers/media/platform/s5p-mfc/Makefile index 15f59b324fef..379008c6d09a 100644 --- a/trunk/drivers/media/platform/s5p-mfc/Makefile +++ b/trunk/drivers/media/platform/s5p-mfc/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o +obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o diff --git a/trunk/drivers/media/tuners/Kconfig b/trunk/drivers/media/tuners/Kconfig index 15665debc572..f6768cad001a 100644 --- a/trunk/drivers/media/tuners/Kconfig +++ b/trunk/drivers/media/tuners/Kconfig @@ -1,3 +1,23 @@ +config MEDIA_ATTACH + bool "Load and attach frontend and tuner driver modules as needed" + depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT + depends on MODULES + default y if !EXPERT + help + Remove the static dependency of DVB card drivers on all + frontend modules for all possible card variants. Instead, + allow the card drivers to only load the frontend modules + they require. + + Also, tuner module will automatically load a tuner driver + when needed, for analog mode. + + This saves several KBytes of memory. + + Note: You will need module-init-tools v3.2 or later for this feature. + + If unsure say Y. + # Analog TV tuners, auto-loaded via tuner.ko config MEDIA_TUNER tristate diff --git a/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 2cc8ec70e3b6..22015fe1a0f3 100644 --- a/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/trunk/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; - struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; + struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; dev_dbg(&d->udev->dev, "%s:\n", __func__); @@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) goto found; } - /* check R820T ID register; reg=00 val=69 */ + /* check R820T by reading tuner stats at I2C addr 0x1a */ ret = rtl28xxu_ctrl_msg(d, &req_r820t); - if (ret == 0 && buf[0] == 0x69) { + if (ret == 0) { priv->tuner = TUNER_RTL2832_R820T; priv->tuner_name = "R820T"; goto found; diff --git a/trunk/drivers/parisc/iosapic.c b/trunk/drivers/parisc/iosapic.c index e79e006eb9ab..9544cdc0d1af 100644 --- a/trunk/drivers/parisc/iosapic.c +++ b/trunk/drivers/parisc/iosapic.c @@ -811,70 +811,6 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) return pcidev->irq; } -static struct iosapic_info *first_isi = NULL; - -#ifdef CONFIG_64BIT -int iosapic_serial_irq(int num) -{ - struct iosapic_info *isi = first_isi; - struct irt_entry *irte = NULL; /* only used if PAT PDC */ - struct vector_info *vi; - int isi_line; /* line used by device */ - - /* lookup IRT entry for isi/slot/pin set */ - irte = &irt_cell[num]; - - DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", - irte, - irte->entry_type, - irte->entry_length, - irte->polarity_trigger, - irte->src_bus_irq_devno, - irte->src_bus_id, - irte->src_seg_id, - irte->dest_iosapic_intin, - (u32) irte->dest_iosapic_addr); - isi_line = irte->dest_iosapic_intin; - - /* get vector info for this input line */ - vi = isi->isi_vector + isi_line; - DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); - - /* If this IRQ line has already been setup, skip it */ - if (vi->irte) - goto out; - - vi->irte = irte; - - /* - * Allocate processor IRQ - * - * XXX/FIXME The txn_alloc_irq() code and related code should be - * moved to enable_irq(). That way we only allocate processor IRQ - * bits for devices that actually have drivers claiming them. - * Right now we assign an IRQ to every PCI device present, - * regardless of whether it's used or not. - */ - vi->txn_irq = txn_alloc_irq(8); - - if (vi->txn_irq < 0) - panic("I/O sapic: couldn't get TXN IRQ\n"); - - /* enable_irq() will use txn_* to program IRdT */ - vi->txn_addr = txn_alloc_addr(vi->txn_irq); - vi->txn_data = txn_alloc_data(vi->txn_irq); - - vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; - vi->eoi_data = cpu_to_le32(vi->txn_data); - - cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); - - out: - - return vi->txn_irq; -} -#endif - /* ** squirrel away the I/O Sapic Version @@ -941,8 +877,6 @@ void *iosapic_register(unsigned long hpa) vip->irqline = (unsigned char) cnt; vip->iosapic = isi; } - if (!first_isi) - first_isi = isi; return isi; } diff --git a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 66b0b26a1381..7a3870f385f6 100644 --- a/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/trunk/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -688,12 +688,8 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen * for qla_tgt_xmit_response LLD code */ - if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { - se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; - se_cmd->residual_count = 0; - } se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - se_cmd->residual_count += se_cmd->data_length; + se_cmd->residual_count = se_cmd->data_length; cmd->bufflen = 0; } diff --git a/trunk/drivers/spi/spi-pxa2xx-dma.c b/trunk/drivers/spi/spi-pxa2xx-dma.c index 6427600b5bbe..c735c5a008a2 100644 --- a/trunk/drivers/spi/spi-pxa2xx-dma.c +++ b/trunk/drivers/spi/spi-pxa2xx-dma.c @@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, int ret; sg_free_table(sgt); - ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); if (ret) return ret; } diff --git a/trunk/drivers/spi/spi-pxa2xx.c b/trunk/drivers/spi/spi-pxa2xx.c index 48b396fced0a..f5d84d6f8222 100644 --- a/trunk/drivers/spi/spi-pxa2xx.c +++ b/trunk/drivers/spi/spi-pxa2xx.c @@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) return NULL; - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "failed to allocate memory for platform data\n"); diff --git a/trunk/drivers/spi/spi-s3c64xx.c b/trunk/drivers/spi/spi-s3c64xx.c index 71cc3e6ef47c..5000586cb98d 100644 --- a/trunk/drivers/spi/spi-s3c64xx.c +++ b/trunk/drivers/spi/spi-s3c64xx.c @@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) } ret = pm_runtime_get_sync(&sdd->pdev->dev); - if (ret < 0) { + if (ret != 0) { dev_err(dev, "Failed to enable device: %d\n", ret); goto out_tx; } diff --git a/trunk/drivers/target/iscsi/iscsi_target_configfs.c b/trunk/drivers/target/iscsi/iscsi_target_configfs.c index 8d8b3ff68490..13e9e715ad2e 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_configfs.c +++ b/trunk/drivers/target/iscsi/iscsi_target_configfs.c @@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser( struct iscsi_tpg_np *tpg_np_iser = NULL; char *endptr; u32 op; - int rc = 0; + int rc; op = simple_strtoul(page, &endptr, 0); if ((op != 1) && (op != 0)) { @@ -174,32 +174,31 @@ static ssize_t lio_target_np_store_iser( return -EINVAL; if (op) { - rc = request_module("ib_isert"); - if (rc != 0) { + int rc = request_module("ib_isert"); + if (rc != 0) pr_warn("Unable to request_module for ib_isert\n"); - rc = 0; - } tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, np->np_ip, tpg_np, ISCSI_INFINIBAND); - if (IS_ERR(tpg_np_iser)) { - rc = PTR_ERR(tpg_np_iser); + if (!tpg_np_iser || IS_ERR(tpg_np_iser)) goto out; - } } else { tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); - if (tpg_np_iser) { - rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); - if (rc < 0) - goto out; - } + if (!tpg_np_iser) + goto out; + + rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); + if (rc < 0) + goto out; } + printk("lio_target_np_store_iser() done, op: %d\n", op); + iscsit_put_tpg(tpg); return count; out: iscsit_put_tpg(tpg); - return rc; + return -EINVAL; } TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl0.c b/trunk/drivers/target/iscsi/iscsi_target_erl0.c index dcb199da06b9..8e6298cc8839 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_erl0.c +++ b/trunk/drivers/target/iscsi/iscsi_target_erl0.c @@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) return 0; sess->time2retain_timer_flags |= ISCSI_TF_STOP; - spin_unlock(&se_tpg->session_lock); + spin_unlock_bh(&se_tpg->session_lock); del_timer_sync(&sess->time2retain_timer); - spin_lock(&se_tpg->session_lock); + spin_lock_bh(&se_tpg->session_lock); sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; pr_debug("Stopped Time2Retain Timer for SID: %u\n", sess->sid); diff --git a/trunk/drivers/target/iscsi/iscsi_target_login.c b/trunk/drivers/target/iscsi/iscsi_target_login.c index 3402241be87c..bb5d5c5bce65 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_login.c +++ b/trunk/drivers/target/iscsi/iscsi_target_login.c @@ -984,6 +984,8 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; + printk("Set np->np_transport to %p -> %s\n", np->np_transport, + np->np_transport->name); return 0; } @@ -1000,6 +1002,7 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) conn->sock = new_sock; conn->login_family = np->np_sockaddr.ss_family; + printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); if (np->np_sockaddr.ss_family == AF_INET6) { memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); diff --git a/trunk/drivers/target/iscsi/iscsi_target_nego.c b/trunk/drivers/target/iscsi/iscsi_target_nego.c index cd5018ff9cd7..7ad912060e21 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_nego.c +++ b/trunk/drivers/target/iscsi/iscsi_target_nego.c @@ -721,6 +721,9 @@ int iscsi_target_locate_portal( start += strlen(key) + strlen(value) + 2; } + + printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); + /* * See 5.3. Login Phase. */ diff --git a/trunk/drivers/tty/pty.c b/trunk/drivers/tty/pty.c index abfd99089781..59bfaecc4e14 100644 --- a/trunk/drivers/tty/pty.c +++ b/trunk/drivers/tty/pty.c @@ -244,9 +244,14 @@ static void pty_flush_buffer(struct tty_struct *tty) static int pty_open(struct tty_struct *tty, struct file *filp) { + int retval = -ENODEV; + if (!tty || !tty->link) - return -ENODEV; + goto out; + + set_bit(TTY_IO_ERROR, &tty->flags); + retval = -EIO; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) @@ -257,11 +262,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp) clear_bit(TTY_IO_ERROR, &tty->flags); clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); - return 0; - + retval = 0; out: - set_bit(TTY_IO_ERROR, &tty->flags); - return -EIO; + return retval; } static void pty_set_termios(struct tty_struct *tty, diff --git a/trunk/drivers/tty/serial/8250/8250_gsc.c b/trunk/drivers/tty/serial/8250/8250_gsc.c index bb91b4713ebd..097dff9c08ad 100644 --- a/trunk/drivers/tty/serial/8250/8250_gsc.c +++ b/trunk/drivers/tty/serial/8250/8250_gsc.c @@ -30,12 +30,6 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT - extern int iosapic_serial_irq(int cellnum); - if (!dev->irq && (dev->id.sversion == 0xad)) - dev->irq = iosapic_serial_irq(dev->mod_index-1); -#endif - if (!dev->irq) { /* We find some unattached serial ports by walking native * busses. These should be silently ignored. Otherwise, @@ -57,8 +51,7 @@ static int __init serial_init_chip(struct parisc_device *dev) memset(&uart, 0, sizeof(uart)); uart.port.iotype = UPIO_MEM; /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ - uart.port.uartclk = (dev->id.sversion != 0xad) ? - 7272727 : 1843200; + uart.port.uartclk = 7272727; uart.port.mapbase = address; uart.port.membase = ioremap_nocache(address, 16); uart.port.irq = dev->irq; @@ -80,7 +73,6 @@ static struct parisc_device_id serial_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, - { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, { 0 } }; diff --git a/trunk/drivers/tty/vt/vt_ioctl.c b/trunk/drivers/tty/vt/vt_ioctl.c index 2bd78e2ac8ec..fc2c06c66e89 100644 --- a/trunk/drivers/tty/vt/vt_ioctl.c +++ b/trunk/drivers/tty/vt/vt_ioctl.c @@ -289,10 +289,13 @@ static int vt_disallocate(unsigned int vc_num) struct vc_data *vc = NULL; int ret = 0; + if (!vc_num) + return 0; + console_lock(); if (VT_BUSY(vc_num)) ret = -EBUSY; - else if (vc_num) + else vc = vc_deallocate(vc_num); console_unlock(); diff --git a/trunk/drivers/usb/phy/Kconfig b/trunk/drivers/usb/phy/Kconfig index 2311b1e4e43c..7ef3eb8617a6 100644 --- a/trunk/drivers/usb/phy/Kconfig +++ b/trunk/drivers/usb/phy/Kconfig @@ -4,17 +4,11 @@ menuconfig USB_PHY bool "USB Physical Layer drivers" help - Most USB controllers have the physical layer signalling part - (commonly called a PHY) built in. However, dual-role devices - (a.k.a. USB on-the-go) which support being USB master or slave - with the same connector often use an external PHY. + USB controllers (those which are host, device or DRD) need a + device to handle the physical layer signalling, commonly called + a PHY. - The drivers in this submenu add support for such PHY devices. - They are not needed for standard master-only (or the vast - majority of slave-only) USB interfaces. - - If you're not sure if this applies to you, it probably doesn't; - say N here. + The following drivers add support for such PHY devices. if USB_PHY diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.c b/trunk/drivers/usb/serial/ti_usb_3410_5052.c index e581c2549a57..c92c5ed4e580 100644 --- a/trunk/drivers/usb/serial/ti_usb_3410_5052.c +++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.c @@ -172,8 +172,7 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.h b/trunk/drivers/usb/serial/ti_usb_3410_5052.h index 4a2423e84d55..b353e7e3d480 100644 --- a/trunk/drivers/usb/serial/ti_usb_3410_5052.h +++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.h @@ -52,9 +52,7 @@ /* Abbott Diabetics vendor and product ids */ #define ABBOTT_VENDOR_ID 0x1a61 -#define ABBOTT_STEREO_PLUG_ID 0x3410 -#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID -#define ABBOTT_STRIP_PORT_ID 0x3420 +#define ABBOTT_PRODUCT_ID 0x3410 /* Commands */ #define TI_GET_VERSION 0x01 diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index 35f281033142..e570081f9f76 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -2470,16 +2470,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, .mode = mode }; int err; - bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || - (mode & FALLOC_FL_PUNCH_HOLE); if (fc->no_fallocate) return -EOPNOTSUPP; - if (lock_inode) { + if (mode & FALLOC_FL_PUNCH_HOLE) { mutex_lock(&inode->i_mutex); - if (mode & FALLOC_FL_PUNCH_HOLE) - fuse_set_nowrite(inode); + fuse_set_nowrite(inode); } req = fuse_get_req_nopages(fc); @@ -2514,9 +2511,8 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, fuse_invalidate_attr(inode); out: - if (lock_inode) { - if (mode & FALLOC_FL_PUNCH_HOLE) - fuse_release_nowrite(inode); + if (mode & FALLOC_FL_PUNCH_HOLE) { + fuse_release_nowrite(inode); mutex_unlock(&inode->i_mutex); } diff --git a/trunk/fs/internal.h b/trunk/fs/internal.h index 68121584ae37..eaa75f75b625 100644 --- a/trunk/fs/internal.h +++ b/trunk/fs/internal.h @@ -131,12 +131,6 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); */ extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); -/* - * splice.c - */ -extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - loff_t *opos, size_t len, unsigned int flags); - /* * pipe.c */ diff --git a/trunk/fs/read_write.c b/trunk/fs/read_write.c index 2cefa417be34..03430008704e 100644 --- a/trunk/fs/read_write.c +++ b/trunk/fs/read_write.c @@ -1064,7 +1064,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, struct fd in, out; struct inode *in_inode, *out_inode; loff_t pos; - loff_t out_pos; ssize_t retval; int fl; @@ -1078,14 +1077,12 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (!(in.file->f_mode & FMODE_READ)) goto fput_in; retval = -ESPIPE; - if (!ppos) { - pos = in.file->f_pos; - } else { - pos = *ppos; + if (!ppos) + ppos = &in.file->f_pos; + else if (!(in.file->f_mode & FMODE_PREAD)) goto fput_in; - } - retval = rw_verify_area(READ, in.file, &pos, count); + retval = rw_verify_area(READ, in.file, ppos, count); if (retval < 0) goto fput_in; count = retval; @@ -1102,8 +1099,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = -EINVAL; in_inode = file_inode(in.file); out_inode = file_inode(out.file); - out_pos = out.file->f_pos; - retval = rw_verify_area(WRITE, out.file, &out_pos, count); + retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); if (retval < 0) goto fput_out; count = retval; @@ -1111,6 +1107,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); + pos = *ppos; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) @@ -1129,23 +1126,18 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, if (in.file->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif - retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); + retval = do_splice_direct(in.file, ppos, out.file, count, fl); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); fsnotify_access(in.file); fsnotify_modify(out.file); - out.file->f_pos = out_pos; - if (ppos) - *ppos = pos; - else - in.file->f_pos = pos; } inc_syscr(current); inc_syscw(current); - if (pos > max) + if (*ppos > max) retval = -EOVERFLOW; fput_out: diff --git a/trunk/fs/splice.c b/trunk/fs/splice.c index d37431dd60a1..e6b25598c8c4 100644 --- a/trunk/fs/splice.c +++ b/trunk/fs/splice.c @@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, { struct file *file = sd->u.file; - return do_splice_from(pipe, file, sd->opos, sd->total_len, + return do_splice_from(pipe, file, &file->f_pos, sd->total_len, sd->flags); } @@ -1283,7 +1283,6 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, * @in: file to splice from * @ppos: input file offset * @out: file to splice to - * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * @@ -1295,7 +1294,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, * */ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, - loff_t *opos, size_t len, unsigned int flags) + size_t len, unsigned int flags) { struct splice_desc sd = { .len = len, @@ -1303,7 +1302,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, .flags = flags, .pos = *ppos, .u.file = out, - .opos = opos, }; long ret; @@ -1327,7 +1325,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; - loff_t offset; + loff_t offset, *off; long ret; ipipe = get_pipe_info(in); @@ -1358,15 +1356,13 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; - } else { - offset = out->f_pos; - } + off = &offset; + } else + off = &out->f_pos; - ret = do_splice_from(ipipe, out, &offset, len, flags); + ret = do_splice_from(ipipe, out, off, len, flags); - if (!off_out) - out->f_pos = offset; - else if (copy_to_user(off_out, &offset, sizeof(loff_t))) + if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) ret = -EFAULT; return ret; @@ -1380,15 +1376,13 @@ static long do_splice(struct file *in, loff_t __user *off_in, return -EINVAL; if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; - } else { - offset = in->f_pos; - } + off = &offset; + } else + off = &in->f_pos; - ret = do_splice_to(in, &offset, opipe, len, flags); + ret = do_splice_to(in, off, opipe, len, flags); - if (!off_in) - in->f_pos = offset; - else if (copy_to_user(off_in, &offset, sizeof(loff_t))) + if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) ret = -EFAULT; return ret; diff --git a/trunk/include/acpi/acpi_bus.h b/trunk/include/acpi/acpi_bus.h index c13c919ab99e..636c59f2003a 100644 --- a/trunk/include/acpi/acpi_bus.h +++ b/trunk/include/acpi/acpi_bus.h @@ -382,7 +382,6 @@ const char *acpi_power_state_string(int state); int acpi_device_get_power(struct acpi_device *device, int *state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); -int acpi_device_fix_up_power(struct acpi_device *device); int acpi_bus_update_power(acpi_handle handle, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); diff --git a/trunk/include/linux/context_tracking.h b/trunk/include/linux/context_tracking.h index fc09d7b0dacf..365f4a61bf04 100644 --- a/trunk/include/linux/context_tracking.h +++ b/trunk/include/linux/context_tracking.h @@ -3,7 +3,6 @@ #include #include -#include #include struct context_tracking { @@ -20,26 +19,6 @@ struct context_tracking { } state; }; -static inline void __guest_enter(void) -{ - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system(current); - current->flags |= PF_VCPU; -} - -static inline void __guest_exit(void) -{ - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system(current); - current->flags &= ~PF_VCPU; -} - #ifdef CONFIG_CONTEXT_TRACKING DECLARE_PER_CPU(struct context_tracking, context_tracking); @@ -56,9 +35,6 @@ static inline bool context_tracking_active(void) extern void user_enter(void); extern void user_exit(void); -extern void guest_enter(void); -extern void guest_exit(void); - static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; @@ -81,17 +57,6 @@ extern void context_tracking_task_switch(struct task_struct *prev, static inline bool context_tracking_in_user(void) { return false; } static inline void user_enter(void) { } static inline void user_exit(void) { } - -static inline void guest_enter(void) -{ - __guest_enter(); -} - -static inline void guest_exit(void) -{ - __guest_exit(); -} - static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void context_tracking_task_switch(struct task_struct *prev, diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index 65c2be22b601..43db02e9c9fa 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -2414,6 +2414,8 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + size_t len, unsigned int flags); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); diff --git a/trunk/include/linux/kvm_host.h b/trunk/include/linux/kvm_host.h index 8db53cfaccdb..f0eea07d2c2b 100644 --- a/trunk/include/linux/kvm_host.h +++ b/trunk/include/linux/kvm_host.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -761,6 +760,42 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) } #endif +static inline void __guest_enter(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags |= PF_VCPU; +} + +static inline void __guest_exit(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags &= ~PF_VCPU; +} + +#ifdef CONFIG_CONTEXT_TRACKING +extern void guest_enter(void); +extern void guest_exit(void); + +#else /* !CONFIG_CONTEXT_TRACKING */ +static inline void guest_enter(void) +{ + __guest_enter(); +} + +static inline void guest_exit(void) +{ + __guest_exit(); +} +#endif /* !CONFIG_CONTEXT_TRACKING */ + static inline void kvm_guest_enter(void) { unsigned long flags; diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index c5b6dbf9c2fc..f463a46424e2 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -389,7 +389,8 @@ struct perf_event { /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; - + int mmap_locked; + struct user_struct *mmap_user; struct ring_buffer *rb; struct list_head rb_entry; diff --git a/trunk/include/linux/preempt.h b/trunk/include/linux/preempt.h index f5d4723cdb3d..87a03c746f17 100644 --- a/trunk/include/linux/preempt.h +++ b/trunk/include/linux/preempt.h @@ -33,25 +33,9 @@ do { \ preempt_schedule(); \ } while (0) -#ifdef CONFIG_CONTEXT_TRACKING - -void preempt_schedule_context(void); - -#define preempt_check_resched_context() \ -do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ - preempt_schedule_context(); \ -} while (0) -#else - -#define preempt_check_resched_context() preempt_check_resched() - -#endif /* CONFIG_CONTEXT_TRACKING */ - #else /* !CONFIG_PREEMPT */ #define preempt_check_resched() do { } while (0) -#define preempt_check_resched_context() do { } while (0) #endif /* CONFIG_PREEMPT */ @@ -104,7 +88,7 @@ do { \ do { \ preempt_enable_no_resched_notrace(); \ barrier(); \ - preempt_check_resched_context(); \ + preempt_check_resched(); \ } while (0) #else /* !CONFIG_PREEMPT_COUNT */ diff --git a/trunk/include/linux/splice.h b/trunk/include/linux/splice.h index 74575cbf2d6f..09a545a7dfa3 100644 --- a/trunk/include/linux/splice.h +++ b/trunk/include/linux/splice.h @@ -35,7 +35,6 @@ struct splice_desc { void *data; /* cookie */ } u; loff_t pos; /* file position */ - loff_t *opos; /* sendfile: output position */ size_t num_spliced; /* number of bytes already spliced */ bool need_wakeup; /* need to wake up writer */ }; diff --git a/trunk/include/linux/vtime.h b/trunk/include/linux/vtime.h index b1dd2db80076..71a5782d8c59 100644 --- a/trunk/include/linux/vtime.h +++ b/trunk/include/linux/vtime.h @@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk) } extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); -extern void vtime_init_idle(struct task_struct *tsk, int cpu); +extern void vtime_init_idle(struct task_struct *tsk); #else static inline void vtime_account_irq_exit(struct task_struct *tsk) { @@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } static inline void vtime_guest_exit(struct task_struct *tsk) { } -static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } +static inline void vtime_init_idle(struct task_struct *tsk) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING diff --git a/trunk/kernel/context_tracking.c b/trunk/kernel/context_tracking.c index 383f8231e436..65349f07b878 100644 --- a/trunk/kernel/context_tracking.c +++ b/trunk/kernel/context_tracking.c @@ -15,6 +15,7 @@ */ #include +#include #include #include #include @@ -70,46 +71,6 @@ void user_enter(void) local_irq_restore(flags); } -#ifdef CONFIG_PREEMPT -/** - * preempt_schedule_context - preempt_schedule called by tracing - * - * The tracing infrastructure uses preempt_enable_notrace to prevent - * recursion and tracing preempt enabling caused by the tracing - * infrastructure itself. But as tracing can happen in areas coming - * from userspace or just about to enter userspace, a preempt enable - * can occur before user_exit() is called. This will cause the scheduler - * to be called when the system is still in usermode. - * - * To prevent this, the preempt_enable_notrace will use this function - * instead of preempt_schedule() to exit user context if needed before - * calling the scheduler. - */ -void __sched notrace preempt_schedule_context(void) -{ - struct thread_info *ti = current_thread_info(); - enum ctx_state prev_ctx; - - if (likely(ti->preempt_count || irqs_disabled())) - return; - - /* - * Need to disable preemption in case user_exit() is traced - * and the tracer calls preempt_enable_notrace() causing - * an infinite recursion. - */ - preempt_disable_notrace(); - prev_ctx = exception_enter(); - preempt_enable_no_resched_notrace(); - - preempt_schedule(); - - preempt_disable_notrace(); - exception_exit(prev_ctx); - preempt_enable_notrace(); -} -EXPORT_SYMBOL_GPL(preempt_schedule_context); -#endif /* CONFIG_PREEMPT */ /** * user_exit - Inform the context tracking that the CPU is diff --git a/trunk/kernel/cpu/idle.c b/trunk/kernel/cpu/idle.c index e695c0a0bcb5..d5585f5e038e 100644 --- a/trunk/kernel/cpu/idle.c +++ b/trunk/kernel/cpu/idle.c @@ -5,7 +5,6 @@ #include #include #include -#include #include @@ -59,7 +58,6 @@ void __weak arch_cpu_idle_dead(void) { } void __weak arch_cpu_idle(void) { cpu_idle_force_poll = 1; - local_irq_enable(); } /* @@ -114,21 +112,6 @@ static void cpu_idle_loop(void) void cpu_startup_entry(enum cpuhp_state state) { - /* - * This #ifdef needs to die, but it's too late in the cycle to - * make this generic (arm and sh have never invoked the canary - * init for the non boot cpus!). Will be fixed in 3.11 - */ -#ifdef CONFIG_X86 - /* - * If we're the non-boot CPU, nothing set the stack canary up - * for us. The boot CPU already has it initialized but no harm - * in doing it again. This is a good place for updating it, as - * we wont ever return from this function (so the invalid - * canaries already on the stack wont ever trigger). - */ - boot_init_stack_canary(); -#endif current_set_polling(); arch_cpu_idle_prepare(); cpu_idle_loop(); diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c index b391907d5352..9dc297faf7c0 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/events/core.c @@ -196,6 +196,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, static void update_context_time(struct perf_event_context *ctx); static u64 perf_event_time(struct perf_event *event); +static void ring_buffer_attach(struct perf_event *event, + struct ring_buffer *rb); + void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) @@ -2915,7 +2918,6 @@ static void free_event_rcu(struct rcu_head *head) } static void ring_buffer_put(struct ring_buffer *rb); -static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); static void free_event(struct perf_event *event) { @@ -2940,30 +2942,15 @@ static void free_event(struct perf_event *event) if (has_branch_stack(event)) { static_key_slow_dec_deferred(&perf_sched_events); /* is system-wide event */ - if (!(event->attach_state & PERF_ATTACH_TASK)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) atomic_dec(&per_cpu(perf_branch_stack_events, event->cpu)); - } } } if (event->rb) { - struct ring_buffer *rb; - - /* - * Can happen when we close an event with re-directed output. - * - * Since we have a 0 refcount, perf_mmap_close() will skip - * over us; possibly making our ring_buffer_put() the last. - */ - mutex_lock(&event->mmap_mutex); - rb = event->rb; - if (rb) { - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - ring_buffer_put(rb); /* could be last */ - } - mutex_unlock(&event->mmap_mutex); + ring_buffer_put(event->rb); + event->rb = NULL; } if (is_cgroup_event(event)) @@ -3201,13 +3188,30 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) unsigned int events = POLL_HUP; /* - * Pin the event->rb by taking event->mmap_mutex; otherwise - * perf_event_set_output() can swizzle our rb and make us miss wakeups. + * Race between perf_event_set_output() and perf_poll(): perf_poll() + * grabs the rb reference but perf_event_set_output() overrides it. + * Here is the timeline for two threads T1, T2: + * t0: T1, rb = rcu_dereference(event->rb) + * t1: T2, old_rb = event->rb + * t2: T2, event->rb = new rb + * t3: T2, ring_buffer_detach(old_rb) + * t4: T1, ring_buffer_attach(rb1) + * t5: T1, poll_wait(event->waitq) + * + * To avoid this problem, we grab mmap_mutex in perf_poll() + * thereby ensuring that the assignment of the new ring buffer + * and the detachment of the old buffer appear atomic to perf_poll() */ mutex_lock(&event->mmap_mutex); - rb = event->rb; - if (rb) + + rcu_read_lock(); + rb = rcu_dereference(event->rb); + if (rb) { + ring_buffer_attach(event, rb); events = atomic_xchg(&rb->poll, 0); + } + rcu_read_unlock(); + mutex_unlock(&event->mmap_mutex); poll_wait(file, &event->waitq, wait); @@ -3517,12 +3521,16 @@ static void ring_buffer_attach(struct perf_event *event, return; spin_lock_irqsave(&rb->event_lock, flags); - if (list_empty(&event->rb_entry)) - list_add(&event->rb_entry, &rb->event_list); + if (!list_empty(&event->rb_entry)) + goto unlock; + + list_add(&event->rb_entry, &rb->event_list); +unlock: spin_unlock_irqrestore(&rb->event_lock, flags); } -static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) +static void ring_buffer_detach(struct perf_event *event, + struct ring_buffer *rb) { unsigned long flags; @@ -3541,10 +3549,13 @@ static void ring_buffer_wakeup(struct perf_event *event) rcu_read_lock(); rb = rcu_dereference(event->rb); - if (rb) { - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) - wake_up_all(&event->waitq); - } + if (!rb) + goto unlock; + + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) + wake_up_all(&event->waitq); + +unlock: rcu_read_unlock(); } @@ -3573,10 +3584,18 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event) static void ring_buffer_put(struct ring_buffer *rb) { + struct perf_event *event, *n; + unsigned long flags; + if (!atomic_dec_and_test(&rb->refcount)) return; - WARN_ON_ONCE(!list_empty(&rb->event_list)); + spin_lock_irqsave(&rb->event_lock, flags); + list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { + list_del_init(&event->rb_entry); + wake_up_all(&event->waitq); + } + spin_unlock_irqrestore(&rb->event_lock, flags); call_rcu(&rb->rcu_head, rb_free_rcu); } @@ -3586,100 +3605,26 @@ static void perf_mmap_open(struct vm_area_struct *vma) struct perf_event *event = vma->vm_file->private_data; atomic_inc(&event->mmap_count); - atomic_inc(&event->rb->mmap_count); } -/* - * A buffer can be mmap()ed multiple times; either directly through the same - * event, or through other events by use of perf_event_set_output(). - * - * In order to undo the VM accounting done by perf_mmap() we need to destroy - * the buffer here, where we still have a VM context. This means we need - * to detach all events redirecting to us. - */ static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; - struct ring_buffer *rb = event->rb; - struct user_struct *mmap_user = rb->mmap_user; - int mmap_locked = rb->mmap_locked; - unsigned long size = perf_data_size(rb); - - atomic_dec(&rb->mmap_count); - - if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) - return; + if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { + unsigned long size = perf_data_size(event->rb); + struct user_struct *user = event->mmap_user; + struct ring_buffer *rb = event->rb; - /* Detach current event from the buffer. */ - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - mutex_unlock(&event->mmap_mutex); - - /* If there's still other mmap()s of this buffer, we're done. */ - if (atomic_read(&rb->mmap_count)) { - ring_buffer_put(rb); /* can't be last */ - return; - } - - /* - * No other mmap()s, detach from all other events that might redirect - * into the now unreachable buffer. Somewhat complicated by the - * fact that rb::event_lock otherwise nests inside mmap_mutex. - */ -again: - rcu_read_lock(); - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { - if (!atomic_long_inc_not_zero(&event->refcount)) { - /* - * This event is en-route to free_event() which will - * detach it and remove it from the list. - */ - continue; - } - rcu_read_unlock(); - - mutex_lock(&event->mmap_mutex); - /* - * Check we didn't race with perf_event_set_output() which can - * swizzle the rb from under us while we were waiting to - * acquire mmap_mutex. - * - * If we find a different rb; ignore this event, a next - * iteration will no longer find it on the list. We have to - * still restart the iteration to make sure we're not now - * iterating the wrong list. - */ - if (event->rb == rb) { - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - ring_buffer_put(rb); /* can't be last, we still have one */ - } + atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); + vma->vm_mm->pinned_vm -= event->mmap_locked; + rcu_assign_pointer(event->rb, NULL); + ring_buffer_detach(event, rb); mutex_unlock(&event->mmap_mutex); - put_event(event); - /* - * Restart the iteration; either we're on the wrong list or - * destroyed its integrity by doing a deletion. - */ - goto again; + ring_buffer_put(rb); + free_uid(user); } - rcu_read_unlock(); - - /* - * It could be there's still a few 0-ref events on the list; they'll - * get cleaned up by free_event() -- they'll also still have their - * ref on the rb and will free it whenever they are done with it. - * - * Aside from that, this buffer is 'fully' detached and unmapped, - * undo the VM accounting. - */ - - atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); - vma->vm_mm->pinned_vm -= mmap_locked; - free_uid(mmap_user); - - ring_buffer_put(rb); /* could be last */ } static const struct vm_operations_struct perf_mmap_vmops = { @@ -3729,24 +3674,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) return -EINVAL; WARN_ON_ONCE(event->ctx->parent_ctx); -again: mutex_lock(&event->mmap_mutex); if (event->rb) { - if (event->rb->nr_pages != nr_pages) { + if (event->rb->nr_pages == nr_pages) + atomic_inc(&event->rb->refcount); + else ret = -EINVAL; - goto unlock; - } - - if (!atomic_inc_not_zero(&event->rb->mmap_count)) { - /* - * Raced against perf_mmap_close() through - * perf_event_set_output(). Try again, hope for better - * luck. - */ - mutex_unlock(&event->mmap_mutex); - goto again; - } - goto unlock; } @@ -3787,16 +3720,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ret = -ENOMEM; goto unlock; } - - atomic_set(&rb->mmap_count, 1); - rb->mmap_locked = extra; - rb->mmap_user = get_current_user(); + rcu_assign_pointer(event->rb, rb); atomic_long_add(user_extra, &user->locked_vm); - vma->vm_mm->pinned_vm += extra; - - ring_buffer_attach(event, rb); - rcu_assign_pointer(event->rb, rb); + event->mmap_locked = extra; + event->mmap_user = get_current_user(); + vma->vm_mm->pinned_vm += event->mmap_locked; perf_event_update_userpage(event); @@ -3805,11 +3734,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) atomic_inc(&event->mmap_count); mutex_unlock(&event->mmap_mutex); - /* - * Since pinned accounting is per vm we cannot allow fork() to copy our - * vma. - */ - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &perf_mmap_vmops; return ret; @@ -6487,8 +6412,6 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) if (atomic_read(&event->mmap_count)) goto unlock; - old_rb = event->rb; - if (output_event) { /* get the rb we want to redirect to */ rb = ring_buffer_get(output_event); @@ -6496,28 +6419,16 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) goto unlock; } + old_rb = event->rb; + rcu_assign_pointer(event->rb, rb); if (old_rb) ring_buffer_detach(event, old_rb); - - if (rb) - ring_buffer_attach(event, rb); - - rcu_assign_pointer(event->rb, rb); - - if (old_rb) { - ring_buffer_put(old_rb); - /* - * Since we detached before setting the new rb, so that we - * could attach the new rb, we could have missed a wakeup. - * Provide it now. - */ - wake_up_all(&event->waitq); - } - ret = 0; unlock: mutex_unlock(&event->mmap_mutex); + if (old_rb) + ring_buffer_put(old_rb); out: return ret; } diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h index ca6599723be5..eb675c4d59df 100644 --- a/trunk/kernel/events/internal.h +++ b/trunk/kernel/events/internal.h @@ -31,10 +31,6 @@ struct ring_buffer { spinlock_t event_lock; struct list_head event_list; - atomic_t mmap_count; - unsigned long mmap_locked; - struct user_struct *mmap_user; - struct perf_event_mmap_page *user_page; void *data_pages[0]; }; diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c index bddf3b201a48..3fed7f0cbcdf 100644 --- a/trunk/kernel/kprobes.c +++ b/trunk/kernel/kprobes.c @@ -467,7 +467,6 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) /* Optimization staging list, protected by kprobe_mutex */ static LIST_HEAD(optimizing_list); static LIST_HEAD(unoptimizing_list); -static LIST_HEAD(freeing_list); static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); @@ -505,7 +504,7 @@ static __kprobes void do_optimize_kprobes(void) * Unoptimize (replace a jump with a breakpoint and remove the breakpoint * if need) kprobes listed on unoptimizing_list. */ -static __kprobes void do_unoptimize_kprobes(void) +static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) { struct optimized_kprobe *op, *tmp; @@ -516,9 +515,9 @@ static __kprobes void do_unoptimize_kprobes(void) /* Ditto to do_optimize_kprobes */ get_online_cpus(); mutex_lock(&text_mutex); - arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); + arch_unoptimize_kprobes(&unoptimizing_list, free_list); /* Loop free_list for disarming */ - list_for_each_entry_safe(op, tmp, &freeing_list, list) { + list_for_each_entry_safe(op, tmp, free_list, list) { /* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); @@ -537,11 +536,11 @@ static __kprobes void do_unoptimize_kprobes(void) } /* Reclaim all kprobes on the free_list */ -static __kprobes void do_free_cleaned_kprobes(void) +static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) { struct optimized_kprobe *op, *tmp; - list_for_each_entry_safe(op, tmp, &freeing_list, list) { + list_for_each_entry_safe(op, tmp, free_list, list) { BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); free_aggr_kprobe(&op->kp); @@ -557,6 +556,8 @@ static __kprobes void kick_kprobe_optimizer(void) /* Kprobe jump optimizer */ static __kprobes void kprobe_optimizer(struct work_struct *work) { + LIST_HEAD(free_list); + mutex_lock(&kprobe_mutex); /* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); @@ -565,7 +566,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * kprobes before waiting for quiesence period. */ - do_unoptimize_kprobes(); + do_unoptimize_kprobes(&free_list); /* * Step 2: Wait for quiesence period to ensure all running interrupts @@ -580,7 +581,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) do_optimize_kprobes(); /* Step 4: Free cleaned kprobes after quiesence period */ - do_free_cleaned_kprobes(); + do_free_cleaned_kprobes(&free_list); mutex_unlock(&module_mutex); mutex_unlock(&kprobe_mutex); @@ -722,19 +723,8 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) if (!list_empty(&op->list)) /* Dequeue from the (un)optimization queue */ list_del_init(&op->list); - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - - if (kprobe_unused(p)) { - /* Enqueue if it is unused */ - list_add(&op->list, &freeing_list); - /* - * Remove unused probes from the hash list. After waiting - * for synchronization, this probe is reclaimed. - * (reclaiming is done by do_free_cleaned_kprobes().) - */ - hlist_del_rcu(&op->kp.hlist); - } + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; /* Don't touch the code, because it is already freed. */ arch_remove_optimized_kprobe(op); } diff --git a/trunk/kernel/range.c b/trunk/kernel/range.c index 322ea8e93e4b..eb911dbce267 100644 --- a/trunk/kernel/range.c +++ b/trunk/kernel/range.c @@ -4,7 +4,7 @@ #include #include #include -#include + #include int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) @@ -32,8 +32,9 @@ int add_range_with_merge(struct range *range, int az, int nr_range, if (start >= end) return nr_range; - /* get new start/end: */ + /* Try to merge it with old one: */ for (i = 0; i < nr_range; i++) { + u64 final_start, final_end; u64 common_start, common_end; if (!range[i].end) @@ -44,16 +45,14 @@ int add_range_with_merge(struct range *range, int az, int nr_range, if (common_start > common_end) continue; - /* new start/end, will add it back at last */ - start = min(range[i].start, start); - end = max(range[i].end, end); + final_start = min(range[i].start, start); + final_end = max(range[i].end, end); - memmove(&range[i], &range[i + 1], - (nr_range - (i + 1)) * sizeof(range[i])); - range[nr_range - 1].start = 0; - range[nr_range - 1].end = 0; - nr_range--; - i--; + /* clear it and add it back for further merge */ + range[i].start = 0; + range[i].end = 0; + return add_range_with_merge(range, az, nr_range, + final_start, final_end); } /* Need to add it: */ diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index e8b335016c52..58453b8272fd 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -633,19 +633,7 @@ void wake_up_nohz_cpu(int cpu) static inline bool got_nohz_idle_kick(void) { int cpu = smp_processor_id(); - - if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) - return false; - - if (idle_cpu(cpu) && !need_resched()) - return true; - - /* - * We can't run Idle Load Balance on this CPU for this time so we - * cancel it and clear NOHZ_BALANCE_KICK - */ - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); - return false; + return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); } #else /* CONFIG_NO_HZ_COMMON */ @@ -1405,9 +1393,8 @@ static void sched_ttwu_pending(void) void scheduler_ipi(void) { - if (llist_empty(&this_rq()->wake_list) - && !tick_nohz_full_cpu(smp_processor_id()) - && !got_nohz_idle_kick()) + if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() + && !tick_nohz_full_cpu(smp_processor_id())) return; /* @@ -1430,7 +1417,7 @@ void scheduler_ipi(void) /* * Check if someone kicked us for doing the nohz idle load balance. */ - if (unlikely(got_nohz_idle_kick())) { + if (unlikely(got_nohz_idle_kick() && !need_resched())) { this_rq()->idle_balance = 1; raise_softirq_irqoff(SCHED_SOFTIRQ); } @@ -4758,7 +4745,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) */ idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); - vtime_init_idle(idle, cpu); + vtime_init_idle(idle); #if defined(CONFIG_SMP) sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif diff --git a/trunk/kernel/sched/cputime.c b/trunk/kernel/sched/cputime.c index b5ccba22603b..cc2dc3eea8a3 100644 --- a/trunk/kernel/sched/cputime.c +++ b/trunk/kernel/sched/cputime.c @@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev) write_seqlock(¤t->vtime_seqlock); current->vtime_snap_whence = VTIME_SYS; - current->vtime_snap = sched_clock_cpu(smp_processor_id()); + current->vtime_snap = sched_clock(); write_sequnlock(¤t->vtime_seqlock); } -void vtime_init_idle(struct task_struct *t, int cpu) +void vtime_init_idle(struct task_struct *t) { unsigned long flags; write_seqlock_irqsave(&t->vtime_seqlock, flags); t->vtime_snap_whence = VTIME_SYS; - t->vtime_snap = sched_clock_cpu(cpu); + t->vtime_snap = sched_clock(); write_sequnlock_irqrestore(&t->vtime_seqlock, flags); } diff --git a/trunk/kernel/time/tick-broadcast.c b/trunk/kernel/time/tick-broadcast.c index b4c245580b79..0c739423b0f9 100644 --- a/trunk/kernel/time/tick-broadcast.c +++ b/trunk/kernel/time/tick-broadcast.c @@ -698,6 +698,10 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) bc->event_handler = tick_handle_oneshot_broadcast; + /* Take the do_timer update */ + if (!tick_nohz_full_cpu(cpu)) + tick_do_timer_cpu = cpu; + /* * We must be careful here. There might be other CPUs * waiting for periodic broadcast. We need to set the diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 0cf1c1453181..f4208138fbf4 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, * we can't safely shutdown that CPU. */ if (have_nohz_full_mask && tick_do_timer_cpu == cpu) - return NOTIFY_BAD; + return -EINVAL; break; } return NOTIFY_OK;