diff --git a/[refs] b/[refs] index 3b91464fd98f..7ef45fb0d1d5 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a2b49102daac7a1d90dc01bfc4350ef68aa1204d +refs/heads/master: 41bc3186b3c92a4ca05e2aa14bb6272fb491e679 diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 3146ed3f6eca..3269576dbfa8 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -1283,20 +1283,6 @@ config ARM_ERRATA_364296 processor into full low interrupt latency mode. ARM11MPCore is not affected. -config ARM_ERRATA_764369 - bool "ARM errata: Data cache line maintenance operation by MVA may not succeed" - depends on CPU_V7 && SMP - help - This option enables the workaround for erratum 764369 - affecting Cortex-A9 MPCore with two or more processors (all - current revisions). Under certain timing circumstances, a data - cache line maintenance operation by MVA targeting an Inner - Shareable memory region may fail to proceed up to either the - Point of Coherency or to the Point of Unification of the - system. This workaround adds a DSB instruction before the - relevant cache maintenance functions and sets a specific bit - in the diagnostic control register of the SCU. - endmenu source "arch/arm/common/Kconfig" diff --git a/trunk/arch/arm/include/asm/futex.h b/trunk/arch/arm/include/asm/futex.h index 253cc86318bf..8c73900da9ed 100644 --- a/trunk/arch/arm/include/asm/futex.h +++ b/trunk/arch/arm/include/asm/futex.h @@ -25,17 +25,17 @@ #ifdef CONFIG_SMP -#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ smp_mb(); \ __asm__ __volatile__( \ - "1: ldrex %1, [%3]\n" \ + "1: ldrex %1, [%2]\n" \ " " insn "\n" \ - "2: strex %2, %0, [%3]\n" \ - " teq %2, #0\n" \ + "2: strex %1, %0, [%2]\n" \ + " teq %1, #0\n" \ " bne 1b\n" \ " mov %0, #0\n" \ - __futex_atomic_ex_table("%5") \ - : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ + __futex_atomic_ex_table("%4") \ + : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #include #include -#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile__( \ - "1: " T(ldr) " %1, [%3]\n" \ + "1: " T(ldr) " %1, [%2]\n" \ " " insn "\n" \ - "2: " T(str) " %0, [%3]\n" \ + "2: " T(str) " %0, [%2]\n" \ " mov %0, #0\n" \ - __futex_atomic_ex_table("%5") \ - : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ + __futex_atomic_ex_table("%4") \ + : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret, tmp; + int oldval = 0, ret; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; @@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg); + __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg); + __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg); + __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg); + __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg); + __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); break; default: ret = -ENOSYS; diff --git a/trunk/arch/arm/include/asm/unistd.h b/trunk/arch/arm/include/asm/unistd.h index c60a2944f95b..2c04ed5efeb5 100644 --- a/trunk/arch/arm/include/asm/unistd.h +++ b/trunk/arch/arm/include/asm/unistd.h @@ -478,8 +478,8 @@ /* * Unimplemented (or alternatively implemented) syscalls */ -#define __IGNORE_fadvise64_64 -#define __IGNORE_migrate_pages +#define __IGNORE_fadvise64_64 1 +#define __IGNORE_migrate_pages 1 #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ diff --git a/trunk/arch/arm/kernel/smp_scu.c b/trunk/arch/arm/kernel/smp_scu.c index 7fcddb75c877..79ed5e7f204a 100644 --- a/trunk/arch/arm/kernel/smp_scu.c +++ b/trunk/arch/arm/kernel/smp_scu.c @@ -13,7 +13,6 @@ #include #include -#include #define SCU_CTRL 0x00 #define SCU_CONFIG 0x04 @@ -38,15 +37,6 @@ void __init scu_enable(void __iomem *scu_base) { u32 scu_ctrl; -#ifdef CONFIG_ARM_ERRATA_764369 - /* Cortex-A9 only */ - if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { - scu_ctrl = __raw_readl(scu_base + 0x30); - if (!(scu_ctrl & 1)) - __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); - } -#endif - scu_ctrl = __raw_readl(scu_base + SCU_CTRL); /* already enabled? */ if (scu_ctrl & 1) diff --git a/trunk/arch/arm/kernel/vmlinux.lds.S b/trunk/arch/arm/kernel/vmlinux.lds.S index 4e66f62b8d41..bf977f8514f6 100644 --- a/trunk/arch/arm/kernel/vmlinux.lds.S +++ b/trunk/arch/arm/kernel/vmlinux.lds.S @@ -23,10 +23,8 @@ #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) #define ARM_EXIT_KEEP(x) x -#define ARM_EXIT_DISCARD(x) #else #define ARM_EXIT_KEEP(x) -#define ARM_EXIT_DISCARD(x) x #endif OUTPUT_ARCH(arm) @@ -41,11 +39,6 @@ jiffies = jiffies_64 + 4; SECTIONS { /* - * XXX: The linker does not define how output sections are - * assigned to input sections when there are multiple statements - * matching the same input section name. There is no documented - * order of matching. - * * unwind exit sections must be discarded before the rest of the * unwind sections get included. */ @@ -54,9 +47,6 @@ SECTIONS *(.ARM.extab.exit.text) ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) - ARM_EXIT_DISCARD(EXIT_TEXT) - ARM_EXIT_DISCARD(EXIT_DATA) - EXIT_CALL #ifndef CONFIG_HOTPLUG *(.ARM.exidx.devexit.text) *(.ARM.extab.devexit.text) @@ -68,8 +58,6 @@ SECTIONS #ifndef CONFIG_SMP_ON_UP *(.alt.smp.init) #endif - *(.discard) - *(.discard.*) } #ifdef CONFIG_XIP_KERNEL @@ -291,6 +279,9 @@ SECTIONS STABS_DEBUG .comment 0 : { *(.comment) } + + /* Default discards */ + DISCARDS } /* diff --git a/trunk/arch/arm/mm/cache-v7.S b/trunk/arch/arm/mm/cache-v7.S index 07c4bc8ea0a4..3b24bfa3b828 100644 --- a/trunk/arch/arm/mm/cache-v7.S +++ b/trunk/arch/arm/mm/cache-v7.S @@ -174,10 +174,6 @@ ENTRY(v7_coherent_user_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 -#ifdef CONFIG_ARM_ERRATA_764369 - ALT_SMP(W(dsb)) - ALT_UP(W(nop)) -#endif 1: USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification add r12, r12, r2 @@ -227,10 +223,6 @@ ENTRY(v7_flush_kern_dcache_area) add r1, r0, r1 sub r3, r2, #1 bic r0, r0, r3 -#ifdef CONFIG_ARM_ERRATA_764369 - ALT_SMP(W(dsb)) - ALT_UP(W(nop)) -#endif 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 @@ -255,10 +247,6 @@ v7_dma_inv_range: sub r3, r2, #1 tst r0, r3 bic r0, r0, r3 -#ifdef CONFIG_ARM_ERRATA_764369 - ALT_SMP(W(dsb)) - ALT_UP(W(nop)) -#endif mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line tst r1, r3 @@ -282,10 +270,6 @@ v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 -#ifdef CONFIG_ARM_ERRATA_764369 - ALT_SMP(W(dsb)) - ALT_UP(W(nop)) -#endif 1: mcr p15, 0, r0, c7, c10, 1 @ clean D / U line add r0, r0, r2 @@ -304,10 +288,6 @@ ENTRY(v7_dma_flush_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 -#ifdef CONFIG_ARM_ERRATA_764369 - ALT_SMP(W(dsb)) - ALT_UP(W(nop)) -#endif 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line add r0, r0, r2 diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index c3ff82f92d9c..0a0a1e7c20d2 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -324,8 +324,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); - else - __dma_free_buffer(page, size); return addr; } diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 1c5b69373a00..8e8da7960dbe 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) /* xchg acts as a barrier before the setting of the high bits */ orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); - orig.spte_high = ssptep->spte_high = sspte.spte_high; + orig.spte_high = ssptep->spte_high; + ssptep->spte_high = sspte.spte_high; count_spte_clear(sptep, spte); return orig.spte; diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index a70d2a5d8c7b..9de3ecfd20f9 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -744,17 +744,20 @@ int ptrace_request(struct task_struct *child, long request, break; si = child->last_siginfo; - if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { - child->jobctl |= JOBCTL_LISTENING; - /* - * If NOTIFY is set, it means event happened between - * start of this trap and now. Trigger re-trap. - */ - if (child->jobctl & JOBCTL_TRAP_NOTIFY) - signal_wake_up(child, true); - ret = 0; - } + if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP)) + break; + + child->jobctl |= JOBCTL_LISTENING; + + /* + * If NOTIFY is set, it means event happened between start + * of this trap and now. Trigger re-trap immediately. + */ + if (child->jobctl & JOBCTL_TRAP_NOTIFY) + signal_wake_up(child, true); + unlock_task_sighand(child, &flags); + ret = 0; break; case PTRACE_DETACH: /* detach a process that was attached. */