diff --git a/[refs] b/[refs] index 07f655a9aa1f..89bb9caac6cd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: bc01637a80f5b670bd70a0279d3f93fa8de1c96d +refs/heads/master: 1fefb8fdc6562057a0e4e4542f3d4323981c9686 diff --git a/trunk/arch/arm/Kconfig.debug b/trunk/arch/arm/Kconfig.debug index e968a52e4881..f15f82bf3a50 100644 --- a/trunk/arch/arm/Kconfig.debug +++ b/trunk/arch/arm/Kconfig.debug @@ -356,15 +356,15 @@ choice is nothing connected to read from the DCC. config DEBUG_SEMIHOSTING - bool "Kernel low-level debug output via semihosting I/O" + bool "Kernel low-level debug output via semihosting I" help Semihosting enables code running on an ARM target to use the I/O facilities on a host debugger/emulator through a - simple SVC call. The host debugger or emulator must have + simple SVC calls. The host debugger or emulator must have semihosting enabled for the special svc call to be trapped otherwise the kernel will crash. - This is known to work with OpenOCD, as well as + This is known to work with OpenOCD, as wellas ARM's Fast Models, or any other controlling environment that implements semihosting. diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index a051dfbdd7db..30eae87ead6d 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux zinstall uinstall install: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: scripts +%.dtb: $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -dtbs: scripts +dtbs: $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ # We use MRPROPER_FILES and CLEAN_FILES now diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S index 81769c1341fa..b8c64b80bafc 100644 --- a/trunk/arch/arm/boot/compressed/head.S +++ b/trunk/arch/arm/boot/compressed/head.S @@ -659,14 +659,10 @@ __armv7_mmu_cache_on: #ifdef CONFIG_CPU_ENDIAN_BE8 orr r0, r0, #1 << 25 @ big-endian page tables #endif - mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client - bic r6, r6, #1 << 31 @ 32-bit translation system - bic r6, r6, #3 << 0 @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control - mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif mcr p15, 0, r0, c7, c5, 4 @ ISB mcr p15, 0, r0, c1, c0, 0 @ load control register diff --git a/trunk/arch/arm/include/asm/assembler.h b/trunk/arch/arm/include/asm/assembler.h index 5c8b3bf4d825..03fb93621d0d 100644 --- a/trunk/arch/arm/include/asm/assembler.h +++ b/trunk/arch/arm/include/asm/assembler.h @@ -320,12 +320,4 @@ .size \name , . - \name .endm - .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req -#ifndef CONFIG_CPU_USE_DOMAINS - adds \tmp, \addr, #\size - 1 - sbcccs \tmp, \tmp, \limit - bcs \bad -#endif - .endm - #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/trunk/arch/arm/include/asm/memory.h b/trunk/arch/arm/include/asm/memory.h index 5f6ddcc56452..e965f1b560f1 100644 --- a/trunk/arch/arm/include/asm/memory.h +++ b/trunk/arch/arm/include/asm/memory.h @@ -187,7 +187,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif #endif -#endif /* __ASSEMBLY__ */ #ifndef PHYS_OFFSET #ifdef PLAT_PHYS_OFFSET @@ -197,8 +196,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif #endif -#ifndef __ASSEMBLY__ - /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. diff --git a/trunk/arch/arm/include/asm/tlb.h b/trunk/arch/arm/include/asm/tlb.h index 99a19512ee26..314d4664eae7 100644 --- a/trunk/arch/arm/include/asm/tlb.h +++ b/trunk/arch/arm/include/asm/tlb.h @@ -199,9 +199,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); -#ifdef CONFIG_ARM_LPAE - tlb_add_flush(tlb, addr); -#else /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. @@ -209,7 +206,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, addr &= PMD_MASK; tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M); -#endif tlb_remove_page(tlb, pte); } diff --git a/trunk/arch/arm/include/asm/uaccess.h b/trunk/arch/arm/include/asm/uaccess.h index 77bd79f2ffdb..479a6352e0b5 100644 --- a/trunk/arch/arm/include/asm/uaccess.h +++ b/trunk/arch/arm/include/asm/uaccess.h @@ -101,39 +101,28 @@ extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -#define __GUP_CLOBBER_1 "lr", "cc" -#ifdef CONFIG_CPU_USE_DOMAINS -#define __GUP_CLOBBER_2 "ip", "lr", "cc" -#else -#define __GUP_CLOBBER_2 "lr", "cc" -#endif -#define __GUP_CLOBBER_4 "lr", "cc" - -#define __get_user_x(__r2,__p,__e,__l,__s) \ +#define __get_user_x(__r2,__p,__e,__s,__i...) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ - __asmeq("%3", "r1") \ "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ - : "0" (__p), "r" (__l) \ - : __GUP_CLOBBER_##__s) + : "0" (__p) \ + : __i, "cc") -#define __get_user_check(x,p) \ +#define get_user(x,p) \ ({ \ - unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ register unsigned long __r2 asm("r2"); \ - register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, __l, 1); \ - break; \ + __get_user_x(__r2, __p, __e, 1, "lr"); \ + break; \ case 2: \ - __get_user_x(__r2, __p, __e, __l, 2); \ + __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, __l, 4); \ + __get_user_x(__r2, __p, __e, 4, "lr"); \ break; \ default: __e = __get_user_bad(); break; \ } \ @@ -141,57 +130,42 @@ extern int __get_user_4(void *); __e; \ }) -#define get_user(x,p) \ - ({ \ - might_fault(); \ - __get_user_check(x,p); \ - }) - extern int __put_user_1(void *, unsigned int); extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__l,__s) \ +#define __put_user_x(__r2,__p,__e,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ - __asmeq("%3", "r1") \ "bl __put_user_" #__s \ : "=&r" (__e) \ - : "0" (__p), "r" (__r2), "r" (__l) \ + : "0" (__p), "r" (__r2) \ : "ip", "lr", "cc") -#define __put_user_check(x,p) \ +#define put_user(x,p) \ ({ \ - unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ - register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r2, __p, __e, __l, 1); \ + __put_user_x(__r2, __p, __e, 1); \ break; \ case 2: \ - __put_user_x(__r2, __p, __e, __l, 2); \ + __put_user_x(__r2, __p, __e, 2); \ break; \ case 4: \ - __put_user_x(__r2, __p, __e, __l, 4); \ + __put_user_x(__r2, __p, __e, 4); \ break; \ case 8: \ - __put_user_x(__r2, __p, __e, __l, 8); \ + __put_user_x(__r2, __p, __e, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ __e; \ }) -#define put_user(x,p) \ - ({ \ - might_fault(); \ - __put_user_check(x,p); \ - }) - #else /* CONFIG_MMU */ /* @@ -245,7 +219,6 @@ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ - might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ @@ -327,7 +300,6 @@ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ - might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c index 281bf3301241..ba386bd94107 100644 --- a/trunk/arch/arm/kernel/hw_breakpoint.c +++ b/trunk/arch/arm/kernel/hw_breakpoint.c @@ -159,12 +159,6 @@ static int debug_arch_supported(void) arch >= ARM_DEBUG_ARCH_V7_1; } -/* Can we determine the watchpoint access type from the fsr? */ -static int debug_exception_updates_fsr(void) -{ - return 0; -} - /* Determine number of WRP registers available. */ static int get_num_wrp_resources(void) { @@ -610,14 +604,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) /* Aligned */ break; case 1: + /* Allow single byte watchpoint. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - case 3: - /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; default: ret = -EINVAL; goto out; @@ -626,35 +619,18 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; - if (!bp->overflow_handler) { - /* - * Mismatch breakpoints are required for single-stepping - * breakpoints. - */ - if (!core_has_mismatch_brps()) - return -EINVAL; - - /* We don't allow mismatch breakpoints in kernel space. */ - if (arch_check_bp_in_kernelspace(bp)) - return -EPERM; - - /* - * Per-cpu breakpoints are not supported by our stepping - * mechanism. - */ - if (!bp->hw.bp_target) - return -EINVAL; - - /* - * We only support specific access types if the fsr - * reports them. - */ - if (!debug_exception_updates_fsr() && - (info->ctrl.type == ARM_BREAKPOINT_LOAD || - info->ctrl.type == ARM_BREAKPOINT_STORE)) - return -EINVAL; + /* + * Currently we rely on an overflow handler to take + * care of single-stepping the breakpoint when it fires. + * In the case of userspace breakpoints on a core with V7 debug, + * we can use the mismatch feature as a poor-man's hardware + * single-step, but this only works for per-task breakpoints. + */ + if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || + !core_has_mismatch_brps() || !bp->hw.bp_target)) { + pr_warning("overflow handler required but none found\n"); + ret = -EINVAL; } - out: return ret; } @@ -730,12 +706,10 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, goto unlock; /* Check that the access type matches. */ - if (debug_exception_updates_fsr()) { - access = (fsr & ARM_FSR_ACCESS_MASK) ? - HW_BREAKPOINT_W : HW_BREAKPOINT_R; - if (!(access & hw_breakpoint_type(wp))) - goto unlock; - } + access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : + HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + goto unlock; /* We have a winner. */ info->trigger = addr; diff --git a/trunk/arch/arm/kernel/traps.c b/trunk/arch/arm/kernel/traps.c index b0179b89a04c..f7945218b8c6 100644 --- a/trunk/arch/arm/kernel/traps.c +++ b/trunk/arch/arm/kernel/traps.c @@ -420,23 +420,20 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #endif instr = *(u32 *) pc; } else if (thumb_mode(regs)) { - if (get_user(instr, (u16 __user *)pc)) - goto die_sig; + get_user(instr, (u16 __user *)pc); if (is_wide_instruction(instr)) { unsigned int instr2; - if (get_user(instr2, (u16 __user *)pc+1)) - goto die_sig; + get_user(instr2, (u16 __user *)pc+1); instr <<= 16; instr |= instr2; } - } else if (get_user(instr, (u32 __user *)pc)) { - goto die_sig; + } else { + get_user(instr, (u32 __user *)pc); } if (call_undef_hook(regs, instr) == 0) return; -die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", diff --git a/trunk/arch/arm/lib/delay.c b/trunk/arch/arm/lib/delay.c index 395d5fbb8fa2..d6dacc69254e 100644 --- a/trunk/arch/arm/lib/delay.c +++ b/trunk/arch/arm/lib/delay.c @@ -59,7 +59,6 @@ void __init init_current_timer_delay(unsigned long freq) { pr_info("Switching to timer-based delay loop\n"); lpj_fine = freq / HZ; - loops_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; diff --git a/trunk/arch/arm/lib/getuser.S b/trunk/arch/arm/lib/getuser.S index 9b06bb41fca6..11093a7c3e32 100644 --- a/trunk/arch/arm/lib/getuser.S +++ b/trunk/arch/arm/lib/getuser.S @@ -16,9 +16,8 @@ * __get_user_X * * Inputs: r0 contains the address - * r1 contains the address limit, which must be preserved * Outputs: r0 is the error code - * r2 contains the zero-extended value + * r2, r3 contains the zero-extended value * lr corrupted * * No other registers must be altered. (see @@ -28,39 +27,33 @@ * Note also that it is intended that __get_user_bad is not global. */ #include -#include #include #include ENTRY(__get_user_1) - check_uaccess r0, 1, r1, r2, __get_user_bad 1: TUSER(ldrb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__get_user_1) ENTRY(__get_user_2) - check_uaccess r0, 2, r1, r2, __get_user_bad -#ifdef CONFIG_CPU_USE_DOMAINS -rb .req ip -2: ldrbt r2, [r0], #1 -3: ldrbt rb, [r0], #0 +#ifdef CONFIG_THUMB2_KERNEL +2: TUSER(ldrb) r2, [r0] +3: TUSER(ldrb) r3, [r0, #1] #else -rb .req r0 -2: ldrb r2, [r0] -3: ldrb rb, [r0, #1] +2: TUSER(ldrb) r2, [r0], #1 +3: TUSER(ldrb) r3, [r0] #endif #ifndef __ARMEB__ - orr r2, r2, rb, lsl #8 + orr r2, r2, r3, lsl #8 #else - orr r2, rb, r2, lsl #8 + orr r2, r3, r2, lsl #8 #endif mov r0, #0 mov pc, lr ENDPROC(__get_user_2) ENTRY(__get_user_4) - check_uaccess r0, 4, r1, r2, __get_user_bad 4: TUSER(ldr) r2, [r0] mov r0, #0 mov pc, lr diff --git a/trunk/arch/arm/lib/putuser.S b/trunk/arch/arm/lib/putuser.S index 3d73dcb959b0..7db25990c589 100644 --- a/trunk/arch/arm/lib/putuser.S +++ b/trunk/arch/arm/lib/putuser.S @@ -16,7 +16,6 @@ * __put_user_X * * Inputs: r0 contains the address - * r1 contains the address limit, which must be preserved * r2, r3 contains the value * Outputs: r0 is the error code * lr corrupted @@ -28,19 +27,16 @@ * Note also that it is intended that __put_user_bad is not global. */ #include -#include #include #include ENTRY(__put_user_1) - check_uaccess r0, 1, r1, ip, __put_user_bad 1: TUSER(strb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_1) ENTRY(__put_user_2) - check_uaccess r0, 2, r1, ip, __put_user_bad mov ip, r2, lsr #8 #ifdef CONFIG_THUMB2_KERNEL #ifndef __ARMEB__ @@ -64,14 +60,12 @@ ENTRY(__put_user_2) ENDPROC(__put_user_2) ENTRY(__put_user_4) - check_uaccess r0, 4, r1, ip, __put_user_bad 4: TUSER(str) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_4) ENTRY(__put_user_8) - check_uaccess r0, 8, r1, ip, __put_user_bad #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(str) r2, [r0] 6: TUSER(str) r3, [r0, #4] diff --git a/trunk/arch/arm/mm/context.c b/trunk/arch/arm/mm/context.c index 4e07eec1270d..119bc52ab93e 100644 --- a/trunk/arch/arm/mm/context.c +++ b/trunk/arch/arm/mm/context.c @@ -63,11 +63,10 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, pid = task_pid_nr(thread->task) << ASID_BITS; asm volatile( " mrc p15, 0, %0, c13, c0, 1\n" - " and %0, %0, %2\n" - " orr %0, %0, %1\n" - " mcr p15, 0, %0, c13, c0, 1\n" + " bfi %1, %0, #0, %2\n" + " mcr p15, 0, %1, c13, c0, 1\n" : "=r" (contextidr), "+r" (pid) - : "I" (~ASID_MASK)); + : "I" (ASID_BITS)); isb(); return NOTIFY_OK; diff --git a/trunk/arch/arm/mm/mm.h b/trunk/arch/arm/mm/mm.h index a8ee92da3544..6776160618ef 100644 --- a/trunk/arch/arm/mm/mm.h +++ b/trunk/arch/arm/mm/mm.h @@ -55,9 +55,6 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* permanent static mappings from iotable_init() */ #define VM_ARM_STATIC_MAPPING 0x40000000 -/* empty mapping */ -#define VM_ARM_EMPTY_MAPPING 0x20000000 - /* mapping type (attributes) for permanent static mappings */ #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20) diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index c2fa21d0103e..4c2d0451e84a 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr) vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm->addr = (void *)addr; vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->caller = pmd_empty_section_gap; vm_area_add_early(vm); } @@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void) /* we're still single threaded hence no lock needed here */ for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) continue; addr = (unsigned long)vm->addr; if (addr < next) @@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size - 1) >= vmalloc_min || - __va(bank->start + bank->size - 1) <= __va(bank->start)) { + if (__va(bank->start + bank->size) > vmalloc_min || + __va(bank->start + bank->size) < __va(bank->start)) { unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " "to -%.8llx (vmalloc region overlap).\n", diff --git a/trunk/arch/blackfin/Kconfig b/trunk/arch/blackfin/Kconfig index c7092e6057c5..f34861920634 100644 --- a/trunk/arch/blackfin/Kconfig +++ b/trunk/arch/blackfin/Kconfig @@ -38,7 +38,6 @@ config BLACKFIN select GENERIC_ATOMIC64 select GENERIC_IRQ_PROBE select IRQ_PER_CPU if SMP - select USE_GENERIC_SMP_HELPERS if SMP select HAVE_NMI_WATCHDOG if NMI_WATCHDOG select GENERIC_SMP_IDLE_THREAD select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS diff --git a/trunk/arch/blackfin/Makefile b/trunk/arch/blackfin/Makefile index 66cf00095b84..d3d7e64ca96d 100644 --- a/trunk/arch/blackfin/Makefile +++ b/trunk/arch/blackfin/Makefile @@ -20,6 +20,7 @@ endif KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) KBUILD_CFLAGS_MODULE += -mlong-calls LDFLAGS += -m elf32bfin +KALLSYMS += --symbol-prefix=_ KBUILD_DEFCONFIG := BF537-STAMP_defconfig diff --git a/trunk/arch/blackfin/include/asm/smp.h b/trunk/arch/blackfin/include/asm/smp.h index 9631598dcc5d..dc3d144b4bb5 100644 --- a/trunk/arch/blackfin/include/asm/smp.h +++ b/trunk/arch/blackfin/include/asm/smp.h @@ -18,8 +18,6 @@ #define raw_smp_processor_id() blackfin_core_id() extern void bfin_relocate_coreb_l1_mem(void); -extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); diff --git a/trunk/arch/blackfin/mach-common/smp.c b/trunk/arch/blackfin/mach-common/smp.c index a40151306b77..00bbe672b3b3 100644 --- a/trunk/arch/blackfin/mach-common/smp.c +++ b/trunk/arch/blackfin/mach-common/smp.c @@ -48,13 +48,10 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS]; struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; -enum ipi_message_type { - BFIN_IPI_TIMER, - BFIN_IPI_RESCHEDULE, - BFIN_IPI_CALL_FUNC, - BFIN_IPI_CALL_FUNC_SINGLE, - BFIN_IPI_CPU_STOP, -}; +#define BFIN_IPI_TIMER 0 +#define BFIN_IPI_RESCHEDULE 1 +#define BFIN_IPI_CALL_FUNC 2 +#define BFIN_IPI_CPU_STOP 3 struct blackfin_flush_data { unsigned long start; @@ -63,20 +60,35 @@ struct blackfin_flush_data { void *secondary_stack; + +struct smp_call_struct { + void (*func)(void *info); + void *info; + int wait; + cpumask_t *waitmask; +}; + static struct blackfin_flush_data smp_flush_data; static DEFINE_SPINLOCK(stop_lock); +struct ipi_message { + unsigned long type; + struct smp_call_struct call_struct; +}; + /* A magic number - stress test shows this is safe for common cases */ #define BFIN_IPI_MSGQ_LEN 5 /* Simple FIFO buffer, overflow leads to panic */ -struct ipi_data { +struct ipi_message_queue { + spinlock_t lock; unsigned long count; - unsigned long bits; + unsigned long head; /* head of the queue */ + struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; }; -static DEFINE_PER_CPU(struct ipi_data, bfin_ipi); +static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); static void ipi_cpu_stop(unsigned int cpu) { @@ -117,6 +129,28 @@ static void ipi_flush_icache(void *info) blackfin_icache_flush_range(fdata->start, fdata->end); } +static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) +{ + int wait; + void (*func)(void *info); + void *info; + func = msg->call_struct.func; + info = msg->call_struct.info; + wait = msg->call_struct.wait; + func(info); + if (wait) { +#ifdef __ARCH_SYNC_CORE_DCACHE + /* + * 'wait' usually means synchronization between CPUs. + * Invalidate D cache in case shared data was changed + * by func() to ensure cache coherence. + */ + resync_core_dcache(); +#endif + cpumask_clear_cpu(cpu, msg->call_struct.waitmask); + } +} + /* Use IRQ_SUPPLE_0 to request reschedule. * When returning from interrupt to user space, * there is chance to reschedule */ @@ -138,95 +172,152 @@ void ipi_timer(void) static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) { - struct ipi_data *bfin_ipi_data; + struct ipi_message *msg; + struct ipi_message_queue *msg_queue; unsigned int cpu = smp_processor_id(); - unsigned long pending; - unsigned long msg; + unsigned long flags; platform_clear_ipi(cpu, IRQ_SUPPLE_1); - bfin_ipi_data = &__get_cpu_var(bfin_ipi); - - while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { - msg = 0; - do { - msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); - switch (msg) { - case BFIN_IPI_TIMER: - ipi_timer(); - break; - case BFIN_IPI_RESCHEDULE: - scheduler_ipi(); - break; - case BFIN_IPI_CALL_FUNC: - generic_smp_call_function_interrupt(); - break; - - case BFIN_IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - - case BFIN_IPI_CPU_STOP: - ipi_cpu_stop(cpu); - break; - } - } while (msg < BITS_PER_LONG); - - smp_mb(); + msg_queue = &__get_cpu_var(ipi_msg_queue); + + spin_lock_irqsave(&msg_queue->lock, flags); + + while (msg_queue->count) { + msg = &msg_queue->ipi_message[msg_queue->head]; + switch (msg->type) { + case BFIN_IPI_TIMER: + ipi_timer(); + break; + case BFIN_IPI_RESCHEDULE: + scheduler_ipi(); + break; + case BFIN_IPI_CALL_FUNC: + ipi_call_function(cpu, msg); + break; + case BFIN_IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + default: + printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", + cpu, msg->type); + break; + } + msg_queue->head++; + msg_queue->head %= BFIN_IPI_MSGQ_LEN; + msg_queue->count--; } + spin_unlock_irqrestore(&msg_queue->lock, flags); return IRQ_HANDLED; } -static void bfin_ipi_init(void) +static void ipi_queue_init(void) { unsigned int cpu; - struct ipi_data *bfin_ipi_data; + struct ipi_message_queue *msg_queue; for_each_possible_cpu(cpu) { - bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - bfin_ipi_data->bits = 0; - bfin_ipi_data->count = 0; + msg_queue = &per_cpu(ipi_msg_queue, cpu); + spin_lock_init(&msg_queue->lock); + msg_queue->count = 0; + msg_queue->head = 0; } } -void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) +static inline void smp_send_message(cpumask_t callmap, unsigned long type, + void (*func) (void *info), void *info, int wait) { unsigned int cpu; - struct ipi_data *bfin_ipi_data; - unsigned long flags; - - local_irq_save(flags); - - for_each_cpu(cpu, cpumask) { - bfin_ipi_data = &per_cpu(bfin_ipi, cpu); - smp_mb(); - set_bit(msg, &bfin_ipi_data->bits); - bfin_ipi_data->count++; + struct ipi_message_queue *msg_queue; + struct ipi_message *msg; + unsigned long flags, next_msg; + cpumask_t waitmask; /* waitmask is shared by all cpus */ + + cpumask_copy(&waitmask, &callmap); + for_each_cpu(cpu, &callmap) { + msg_queue = &per_cpu(ipi_msg_queue, cpu); + spin_lock_irqsave(&msg_queue->lock, flags); + if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { + next_msg = (msg_queue->head + msg_queue->count) + % BFIN_IPI_MSGQ_LEN; + msg = &msg_queue->ipi_message[next_msg]; + msg->type = type; + if (type == BFIN_IPI_CALL_FUNC) { + msg->call_struct.func = func; + msg->call_struct.info = info; + msg->call_struct.wait = wait; + msg->call_struct.waitmask = &waitmask; + } + msg_queue->count++; + } else + panic("IPI message queue overflow\n"); + spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); } - local_irq_restore(flags); + if (wait) { + while (!cpumask_empty(&waitmask)) + blackfin_dcache_invalidate_range( + (unsigned long)(&waitmask), + (unsigned long)(&waitmask)); +#ifdef __ARCH_SYNC_CORE_DCACHE + /* + * Invalidate D cache in case shared data was changed by + * other processors to ensure cache coherence. + */ + resync_core_dcache(); +#endif + } } -void arch_send_call_function_single_ipi(int cpu) +int smp_call_function(void (*func)(void *info), void *info, int wait) { - send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE); + cpumask_t callmap; + + preempt_disable(); + cpumask_copy(&callmap, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &callmap); + if (!cpumask_empty(&callmap)) + smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); + + preempt_enable(); + + return 0; } +EXPORT_SYMBOL_GPL(smp_call_function); -void arch_send_call_function_ipi_mask(const struct cpumask *mask) +int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, + int wait) { - send_ipi(mask, BFIN_IPI_CALL_FUNC); + unsigned int cpu = cpuid; + cpumask_t callmap; + + if (cpu_is_offline(cpu)) + return 0; + cpumask_clear(&callmap); + cpumask_set_cpu(cpu, &callmap); + + smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); + + return 0; } +EXPORT_SYMBOL_GPL(smp_call_function_single); void smp_send_reschedule(int cpu) { - send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE); + cpumask_t callmap; + /* simply trigger an ipi */ + + cpumask_clear(&callmap); + cpumask_set_cpu(cpu, &callmap); + + smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); return; } void smp_send_msg(const struct cpumask *mask, unsigned long type) { - send_ipi(mask, type); + smp_send_message(*mask, type, NULL, NULL, 0); } void smp_timer_broadcast(const struct cpumask *mask) @@ -242,7 +333,7 @@ void smp_send_stop(void) cpumask_copy(&callmap, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &callmap); if (!cpumask_empty(&callmap)) - send_ipi(&callmap, BFIN_IPI_CPU_STOP); + smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); preempt_enable(); @@ -345,7 +436,7 @@ void __init smp_prepare_boot_cpu(void) void __init smp_prepare_cpus(unsigned int max_cpus) { platform_prepare_cpus(max_cpus); - bfin_ipi_init(); + ipi_queue_init(); platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); } diff --git a/trunk/arch/x86/kvm/i8259.c b/trunk/arch/x86/kvm/i8259.c index 9fc9aa7ac703..e498b18f010c 100644 --- a/trunk/arch/x86/kvm/i8259.c +++ b/trunk/arch/x86/kvm/i8259.c @@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) if (val & 0x10) { u8 edge_irr = s->irr & ~s->elcr; int i; - bool found = false; + bool found; struct kvm_vcpu *vcpu; s->init4 = val & 1; diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index b1eb202ee76a..c00f03de1b79 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -3619,7 +3619,6 @@ static void seg_setup(int seg) static int alloc_apic_access_page(struct kvm *kvm) { - struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; @@ -3634,13 +3633,7 @@ static int alloc_apic_access_page(struct kvm *kvm) if (r) goto out; - page = gfn_to_page(kvm, 0xfee00); - if (is_error_page(page)) { - r = -EFAULT; - goto out; - } - - kvm->arch.apic_access_page = page; + kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); out: mutex_unlock(&kvm->slots_lock); return r; @@ -3648,7 +3641,6 @@ static int alloc_apic_access_page(struct kvm *kvm) static int alloc_identity_pagetable(struct kvm *kvm) { - struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; @@ -3664,13 +3656,8 @@ static int alloc_identity_pagetable(struct kvm *kvm) if (r) goto out; - page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); - if (is_error_page(page)) { - r = -EFAULT; - goto out; - } - - kvm->arch.ept_identity_pagetable = page; + kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, + kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); out: mutex_unlock(&kvm->slots_lock); return r; @@ -6588,7 +6575,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && - best && (best->ebx & bit(X86_FEATURE_INVPCID)) && + best && (best->ecx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, @@ -6598,7 +6585,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); if (best) - best->ebx &= ~bit(X86_FEATURE_INVPCID); + best->ecx &= ~bit(X86_FEATURE_INVPCID); } } diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 2966c847d489..148ed666e311 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -5113,20 +5113,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) +static void vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) - return 0; + return; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; vcpu->arch.apic->vapic_page = page; - return 0; } static void vapic_exit(struct kvm_vcpu *vcpu) @@ -5433,11 +5430,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } + vapic_enter(vcpu); r = 1; while (r > 0) { diff --git a/trunk/crypto/authenc.c b/trunk/crypto/authenc.c index d0583a4489e6..5ef7ba6b6a76 100644 --- a/trunk/crypto/authenc.c +++ b/trunk/crypto/authenc.c @@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, cryptlen += ivsize; } - if (req->assoclen && sg_is_last(assoc)) { + if (sg_is_last(assoc)) { authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); @@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, cryptlen += ivsize; } - if (req->assoclen && sg_is_last(assoc)) { + if (sg_is_last(assoc)) { authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 50d5dea0ff59..c3f52eb97fca 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -268,6 +268,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, + /* JMicron 362B and 362C have an AHCI function with IDE class code */ + { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr }, + { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr }, /* ATI */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ diff --git a/trunk/drivers/crypto/caam/key_gen.c b/trunk/drivers/crypto/caam/key_gen.c index d216cd3cc569..002888185f17 100644 --- a/trunk/drivers/crypto/caam/key_gen.c +++ b/trunk/drivers/crypto/caam/key_gen.c @@ -120,4 +120,3 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, return ret; } -EXPORT_SYMBOL(gen_split_key); diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c index ed38454228c6..dc27598785e5 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4066,6 +4066,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); + spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); @@ -5391,8 +5392,6 @@ static int __init megasas_init(void) printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); - spin_lock_init(&poll_aen_lock); - support_poll_for_event = 2; support_device_change = 1; diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c index b25757d1e91b..9d46fcbe7755 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2424,13 +2424,10 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } /* command line tunables for max controller queue depth */ - if (max_queue_depth != -1 && max_queue_depth != 0) { - max_request_credit = min_t(u16, max_queue_depth + - ioc->hi_priority_depth + ioc->internal_depth, - facts->RequestCredit); - if (max_request_credit > MAX_HBA_QUEUE_DEPTH) - max_request_credit = MAX_HBA_QUEUE_DEPTH; - } else + if (max_queue_depth != -1) + max_request_credit = (max_queue_depth < facts->RequestCredit) + ? max_queue_depth : facts->RequestCredit; + else max_request_credit = min_t(u16, facts->RequestCredit, MAX_HBA_QUEUE_DEPTH); @@ -2505,7 +2502,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ - ioc->shost->can_queue = ioc->scsiio_depth; + ioc->shost->can_queue = ioc->scsiio_depth - (2); dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index de2337f255a7..4a6381c87253 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -42,8 +42,6 @@ #include -static void scsi_eh_done(struct scsi_cmnd *scmd); - #define SENSE_TIMEOUT (10*HZ) /* @@ -243,14 +241,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (! scsi_command_normalize_sense(scmd, &sshdr)) return FAILED; /* no valid sense data */ - if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) - /* - * nasty: for mid-layer issued TURs, we need to return the - * actual sense data without any recovery attempt. For eh - * issued ones, we need to try to recover and interpret - */ - return SUCCESS; - if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index faa790fba134..ffd77739ae3e 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -776,6 +776,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ + req->errors = result; if (result) { if (sense_valid && req->sense) { /* @@ -791,10 +792,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); } - /* - * __scsi_error_from_host_byte may have reset the host_byte - */ - req->errors = cmd->result; req->resid_len = scsi_get_resid(cmd); diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index d947ffc20ceb..56a93794c470 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -764,16 +764,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->model = (char *) (sdev->inquiry + 16); sdev->rev = (char *) (sdev->inquiry + 32); - if (strncmp(sdev->vendor, "ATA ", 8) == 0) { - /* - * sata emulation layer device. This is a hack to work around - * the SATL power management specifications which state that - * when the SATL detects the device has gone into standby - * mode, it shall respond with NOT READY. - */ - sdev->allow_restart = 1; - } - if (*bflags & BLIST_ISROM) { sdev->type = TYPE_ROM; sdev->removable = 1; diff --git a/trunk/fs/fuse/control.c b/trunk/fs/fuse/control.c index 75a20c092dd4..03ff5b1eba93 100644 --- a/trunk/fs/fuse/control.c +++ b/trunk/fs/fuse/control.c @@ -117,7 +117,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned uninitialized_var(val); + unsigned val; ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, @@ -154,7 +154,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned uninitialized_var(val); + unsigned val; ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, diff --git a/trunk/fs/fuse/cuse.c b/trunk/fs/fuse/cuse.c index ee8d55042298..3426521f3205 100644 --- a/trunk/fs/fuse/cuse.c +++ b/trunk/fs/fuse/cuse.c @@ -396,7 +396,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) err_region: unregister_chrdev_region(devt, 1); err: - fuse_conn_kill(fc); + fc->conn_error = 1; goto out; } @@ -532,6 +532,8 @@ static int cuse_channel_release(struct inode *inode, struct file *file) cdev_del(cc->cdev); } + /* kill connection and shutdown channel */ + fuse_conn_kill(&cc->fc); rc = fuse_dev_release(inode, file); /* puts the base reference */ return rc; diff --git a/trunk/fs/fuse/dev.c b/trunk/fs/fuse/dev.c index f4246cfc8d87..7df2b5e8fbe1 100644 --- a/trunk/fs/fuse/dev.c +++ b/trunk/fs/fuse/dev.c @@ -1576,7 +1576,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, req->pages[req->num_pages] = page; req->num_pages++; - offset = 0; num -= this_num; total_len += this_num; index++; diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index fca222dabe3c..ce0a2838ccd0 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -367,6 +367,11 @@ void fuse_conn_kill(struct fuse_conn *fc) wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); wake_up_all(&fc->reserved_req_waitq); + mutex_lock(&fuse_mutex); + list_del(&fc->entry); + fuse_ctl_remove_conn(fc); + mutex_unlock(&fuse_mutex); + fuse_bdi_destroy(fc); } EXPORT_SYMBOL_GPL(fuse_conn_kill); @@ -375,14 +380,7 @@ static void fuse_put_super(struct super_block *sb) struct fuse_conn *fc = get_fuse_conn_super(sb); fuse_send_destroy(fc); - fuse_conn_kill(fc); - mutex_lock(&fuse_mutex); - list_del(&fc->entry); - fuse_ctl_remove_conn(fc); - mutex_unlock(&fuse_mutex); - fuse_bdi_destroy(fc); - fuse_conn_put(fc); } diff --git a/trunk/fs/nfs/file.c b/trunk/fs/nfs/file.c index 6a7fcab7ecb3..75d6d0a3d32e 100644 --- a/trunk/fs/nfs/file.c +++ b/trunk/fs/nfs/file.c @@ -287,12 +287,10 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file->f_path.dentry->d_inode; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret != 0) - goto out; mutex_lock(&inode->i_mutex); ret = nfs_file_fsync_commit(file, start, end, datasync); mutex_unlock(&inode->i_mutex); -out: + return ret; } diff --git a/trunk/fs/nfs/inode.c b/trunk/fs/nfs/inode.c index 9b47610338f5..c6e895f0fbf3 100644 --- a/trunk/fs/nfs/inode.c +++ b/trunk/fs/nfs/inode.c @@ -154,7 +154,7 @@ static void nfs_zap_caches_locked(struct inode *inode) nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = jiffies; - memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf)); + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; else diff --git a/trunk/fs/nfs/nfs3proc.c b/trunk/fs/nfs/nfs3proc.c index 69322096c325..d6b3b5f2d779 100644 --- a/trunk/fs/nfs/nfs3proc.c +++ b/trunk/fs/nfs/nfs3proc.c @@ -643,7 +643,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; - __be32 *verf = NFS_I(dir)->cookieverf; + __be32 *verf = NFS_COOKIEVERF(dir); struct nfs3_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, diff --git a/trunk/fs/nfs/nfs4file.c b/trunk/fs/nfs/nfs4file.c index eb5eb8eef4d3..acb65e7887f8 100644 --- a/trunk/fs/nfs/nfs4file.c +++ b/trunk/fs/nfs/nfs4file.c @@ -96,15 +96,13 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file->f_path.dentry->d_inode; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret != 0) - goto out; mutex_lock(&inode->i_mutex); ret = nfs_file_fsync_commit(file, start, end, datasync); if (!ret && !datasync) /* application has asked for meta-data sync */ ret = pnfs_layoutcommit_inode(inode, true); mutex_unlock(&inode->i_mutex); -out: + return ret; } diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index 1e50326d00dd..635274140b18 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -3215,11 +3215,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); - nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); + nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); res.pgbase = args.pgbase; status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); if (status >= 0) { - memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); + memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; } @@ -3653,11 +3653,11 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server) && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); } -/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that - * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on +/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that + * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on * the stack. */ -#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) +#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) static int buf_to_pages_noslab(const void *buf, size_t buflen, struct page **pages, unsigned int *pgbase) @@ -3668,7 +3668,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, spages = pages; do { - len = min_t(size_t, PAGE_SIZE, buflen); + len = min_t(size_t, PAGE_CACHE_SIZE, buflen); newpage = alloc_page(GFP_KERNEL); if (newpage == NULL) @@ -3739,7 +3739,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size struct nfs4_cached_acl *acl; size_t buflen = sizeof(*acl) + acl_len; - if (buflen <= PAGE_SIZE) { + if (pages && buflen <= PAGE_SIZE) { acl = kmalloc(buflen, GFP_KERNEL); if (acl == NULL) goto out; @@ -3782,15 +3782,17 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu .rpc_argp = &args, .rpc_resp = &res, }; - unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); - int ret = -ENOMEM, i; + int ret = -ENOMEM, npages, i; + size_t acl_len = 0; + npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* As long as we're doing a round trip to the server anyway, * let's be prepared for a page of acl data. */ if (npages == 0) npages = 1; - if (npages > ARRAY_SIZE(pages)) - return -ERANGE; + + /* Add an extra page to handle the bitmap returned */ + npages++; for (i = 0; i < npages; i++) { pages[i] = alloc_page(GFP_KERNEL); @@ -3806,6 +3808,11 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu args.acl_len = npages * PAGE_SIZE; args.acl_pgbase = 0; + /* Let decode_getfacl know not to fail if the ACL data is larger than + * the page we send as a guess */ + if (buf == NULL) + res.acl_flags |= NFS4_ACL_LEN_REQUEST; + dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __func__, buf, buflen, npages, args.acl_len); ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), @@ -3813,19 +3820,20 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu if (ret) goto out_free; - /* Handle the case where the passed-in buffer is too short */ - if (res.acl_flags & NFS4_ACL_TRUNC) { - /* Did the user only issue a request for the acl length? */ - if (buf == NULL) - goto out_ok; + acl_len = res.acl_len; + if (acl_len > args.acl_len) + nfs4_write_cached_acl(inode, NULL, 0, acl_len); + else + nfs4_write_cached_acl(inode, pages, res.acl_data_offset, + acl_len); + if (buf) { ret = -ERANGE; - goto out_free; + if (acl_len > buflen) + goto out_free; + _copy_from_pages(buf, pages, res.acl_data_offset, + acl_len); } - nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); - if (buf) - _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); -out_ok: - ret = res.acl_len; + ret = acl_len; out_free: for (i = 0; i < npages; i++) if (pages[i]) @@ -3883,13 +3891,10 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl .rpc_argp = &arg, .rpc_resp = &res, }; - unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); int ret, i; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; - if (npages > ARRAY_SIZE(pages)) - return -ERANGE; i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); if (i < 0) return i; diff --git a/trunk/fs/nfs/nfs4xdr.c b/trunk/fs/nfs/nfs4xdr.c index 8dba6bd48557..1bfbd67c556d 100644 --- a/trunk/fs/nfs/nfs4xdr.c +++ b/trunk/fs/nfs/nfs4xdr.c @@ -5072,14 +5072,18 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, * are stored with the acl data to handle the problem of * variable length bitmaps.*/ res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset; - res->acl_len = attrlen; - /* Check for receive buffer overflow */ - if (res->acl_len > (xdr->nwords << 2) || - res->acl_len + res->acl_data_offset > xdr->buf->page_len) { - res->acl_flags |= NFS4_ACL_TRUNC; + /* We ignore &savep and don't do consistency checks on + * the attr length. Let userspace figure it out.... */ + res->acl_len = attrlen; + if (attrlen > (xdr->nwords << 2)) { + if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { + /* getxattr interface called with a NULL buf */ + goto out; + } dprintk("NFS: acl reply: attrlen %u > page_len %u\n", attrlen, xdr->nwords << 2); + return -EINVAL; } } else status = -EOPNOTSUPP; @@ -6225,8 +6229,7 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr, status = decode_open(xdr, res); if (status) goto out; - status = decode_getfh(xdr, &res->fh); - if (status) + if (decode_getfh(xdr, &res->fh) != 0) goto out; decode_getfattr(xdr, res->f_attr, res->server); out: diff --git a/trunk/fs/nfs/super.c b/trunk/fs/nfs/super.c index b8eda700584b..239aff7338eb 100644 --- a/trunk/fs/nfs/super.c +++ b/trunk/fs/nfs/super.c @@ -1867,7 +1867,6 @@ static int nfs23_validate_mount_data(void *options, memcpy(sap, &data->addr, sizeof(data->addr)); args->nfs_server.addrlen = sizeof(data->addr); - args->nfs_server.port = ntohs(data->addr.sin_port); if (!nfs_verify_server_address(sap)) goto out_no_address; @@ -2565,7 +2564,6 @@ static int nfs4_validate_mount_data(void *options, return -EFAULT; if (!nfs_verify_server_address(sap)) goto out_no_address; - args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); if (data->auth_flavourlen) { if (data->auth_flavourlen > 1) diff --git a/trunk/include/linux/nfs_fs.h b/trunk/include/linux/nfs_fs.h index 4b03f56e280e..1f8fc7f9bcd8 100644 --- a/trunk/include/linux/nfs_fs.h +++ b/trunk/include/linux/nfs_fs.h @@ -265,6 +265,11 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) return NFS_SERVER(inode)->nfs_client->rpc_ops; } +static inline __be32 *NFS_COOKIEVERF(const struct inode *inode) +{ + return NFS_I(inode)->cookieverf; +} + static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); diff --git a/trunk/include/linux/nfs_xdr.h b/trunk/include/linux/nfs_xdr.h index be9cf3c7e79e..ac7c8ae254f2 100644 --- a/trunk/include/linux/nfs_xdr.h +++ b/trunk/include/linux/nfs_xdr.h @@ -652,7 +652,7 @@ struct nfs_getaclargs { }; /* getxattr ACL interface flags */ -#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */ +#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */ struct nfs_getaclres { size_t acl_len; size_t acl_data_offset; diff --git a/trunk/include/linux/sunrpc/xprt.h b/trunk/include/linux/sunrpc/xprt.h index bf8c49ff7530..cff40aa7db62 100644 --- a/trunk/include/linux/sunrpc/xprt.h +++ b/trunk/include/linux/sunrpc/xprt.h @@ -114,7 +114,6 @@ struct rpc_xprt_ops { void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); - void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_task *task); @@ -282,8 +281,6 @@ void xprt_connect(struct rpc_task *task); void xprt_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); -void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); -void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_prepare_transmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 1e1373bcb3e3..692d97628a10 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -66,7 +66,6 @@ enum { /* pool flags */ POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ - POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ @@ -653,7 +652,7 @@ static bool need_to_manage_workers(struct worker_pool *pool) /* Do we have too many workers and should some go away? */ static bool too_many_workers(struct worker_pool *pool) { - bool managing = pool->flags & POOL_MANAGING_WORKERS; + bool managing = mutex_is_locked(&pool->manager_mutex); int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ int nr_busy = pool->nr_workers - nr_idle; @@ -1327,15 +1326,6 @@ static void idle_worker_rebind(struct worker *worker) /* we did our part, wait for rebind_workers() to finish up */ wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); - - /* - * rebind_workers() shouldn't finish until all workers passed the - * above WORKER_REBIND wait. Tell it when done. - */ - spin_lock_irq(&worker->pool->gcwq->lock); - if (!--worker->idle_rebind->cnt) - complete(&worker->idle_rebind->done); - spin_unlock_irq(&worker->pool->gcwq->lock); } /* @@ -1406,15 +1396,12 @@ static void rebind_workers(struct global_cwq *gcwq) /* set REBIND and kick idle ones, we'll wait for these later */ for_each_worker_pool(pool, gcwq) { list_for_each_entry(worker, &pool->idle_list, entry) { - unsigned long worker_flags = worker->flags; - if (worker->flags & WORKER_REBIND) continue; - /* morph UNBOUND to REBIND atomically */ - worker_flags &= ~WORKER_UNBOUND; - worker_flags |= WORKER_REBIND; - ACCESS_ONCE(worker->flags) = worker_flags; + /* morph UNBOUND to REBIND */ + worker->flags &= ~WORKER_UNBOUND; + worker->flags |= WORKER_REBIND; idle_rebind.cnt++; worker->idle_rebind = &idle_rebind; @@ -1432,15 +1419,25 @@ static void rebind_workers(struct global_cwq *gcwq) goto retry; } - /* all idle workers are rebound, rebind busy workers */ + /* + * All idle workers are rebound and waiting for %WORKER_REBIND to + * be cleared inside idle_worker_rebind(). Clear and release. + * Clearing %WORKER_REBIND from this foreign context is safe + * because these workers are still guaranteed to be idle. + */ + for_each_worker_pool(pool, gcwq) + list_for_each_entry(worker, &pool->idle_list, entry) + worker->flags &= ~WORKER_REBIND; + + wake_up_all(&gcwq->rebind_hold); + + /* rebind busy workers */ for_each_busy_worker(worker, i, pos, gcwq) { struct work_struct *rebind_work = &worker->rebind_work; - unsigned long worker_flags = worker->flags; - /* morph UNBOUND to REBIND atomically */ - worker_flags &= ~WORKER_UNBOUND; - worker_flags |= WORKER_REBIND; - ACCESS_ONCE(worker->flags) = worker_flags; + /* morph UNBOUND to REBIND */ + worker->flags &= ~WORKER_UNBOUND; + worker->flags |= WORKER_REBIND; if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(rebind_work))) @@ -1452,34 +1449,6 @@ static void rebind_workers(struct global_cwq *gcwq) worker->scheduled.next, work_color_to_flags(WORK_NO_COLOR)); } - - /* - * All idle workers are rebound and waiting for %WORKER_REBIND to - * be cleared inside idle_worker_rebind(). Clear and release. - * Clearing %WORKER_REBIND from this foreign context is safe - * because these workers are still guaranteed to be idle. - * - * We need to make sure all idle workers passed WORKER_REBIND wait - * in idle_worker_rebind() before returning; otherwise, workers can - * get stuck at the wait if hotplug cycle repeats. - */ - idle_rebind.cnt = 1; - INIT_COMPLETION(idle_rebind.done); - - for_each_worker_pool(pool, gcwq) { - list_for_each_entry(worker, &pool->idle_list, entry) { - worker->flags &= ~WORKER_REBIND; - idle_rebind.cnt++; - } - } - - wake_up_all(&gcwq->rebind_hold); - - if (--idle_rebind.cnt) { - spin_unlock_irq(&gcwq->lock); - wait_for_completion(&idle_rebind.done); - spin_lock_irq(&gcwq->lock); - } } static struct worker *alloc_worker(void) @@ -1825,45 +1794,9 @@ static bool manage_workers(struct worker *worker) struct worker_pool *pool = worker->pool; bool ret = false; - if (pool->flags & POOL_MANAGING_WORKERS) + if (!mutex_trylock(&pool->manager_mutex)) return ret; - pool->flags |= POOL_MANAGING_WORKERS; - - /* - * To simplify both worker management and CPU hotplug, hold off - * management while hotplug is in progress. CPU hotplug path can't - * grab %POOL_MANAGING_WORKERS to achieve this because that can - * lead to idle worker depletion (all become busy thinking someone - * else is managing) which in turn can result in deadlock under - * extreme circumstances. Use @pool->manager_mutex to synchronize - * manager against CPU hotplug. - * - * manager_mutex would always be free unless CPU hotplug is in - * progress. trylock first without dropping @gcwq->lock. - */ - if (unlikely(!mutex_trylock(&pool->manager_mutex))) { - spin_unlock_irq(&pool->gcwq->lock); - mutex_lock(&pool->manager_mutex); - /* - * CPU hotplug could have happened while we were waiting - * for manager_mutex. Hotplug itself can't handle us - * because manager isn't either on idle or busy list, and - * @gcwq's state and ours could have deviated. - * - * As hotplug is now excluded via manager_mutex, we can - * simply try to bind. It will succeed or fail depending - * on @gcwq's current state. Try it and adjust - * %WORKER_UNBOUND accordingly. - */ - if (worker_maybe_bind_and_lock(worker)) - worker->flags &= ~WORKER_UNBOUND; - else - worker->flags |= WORKER_UNBOUND; - - ret = true; - } - pool->flags &= ~POOL_MANAGE_WORKERS; /* @@ -1873,7 +1806,6 @@ static bool manage_workers(struct worker *worker) ret |= maybe_destroy_workers(pool); ret |= maybe_create_worker(pool); - pool->flags &= ~POOL_MANAGING_WORKERS; mutex_unlock(&pool->manager_mutex); return ret; } diff --git a/trunk/lib/digsig.c b/trunk/lib/digsig.c index 8c0e62975c88..286d558033e2 100644 --- a/trunk/lib/digsig.c +++ b/trunk/lib/digsig.c @@ -163,11 +163,9 @@ static int digsig_verify_rsa(struct key *key, memcpy(out1 + head, p, l); err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); - if (err) - goto err; - if (len != hlen || memcmp(out2, h, hlen)) - err = -EINVAL; + if (!err && len == hlen) + err = memcmp(out2, h, hlen); err: mpi_free(in); diff --git a/trunk/net/sunrpc/xprt.c b/trunk/net/sunrpc/xprt.c index 5d7f61d7559c..a5a402a7d21f 100644 --- a/trunk/net/sunrpc/xprt.c +++ b/trunk/net/sunrpc/xprt.c @@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) return false; } -void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) +static void xprt_alloc_slot(struct rpc_task *task) { + struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req; - spin_lock(&xprt->reserve_lock); if (!list_empty(&xprt->free)) { req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); list_del(&req->rq_list); @@ -994,29 +994,12 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) default: task->tk_status = -EAGAIN; } - spin_unlock(&xprt->reserve_lock); return; out_init_req: task->tk_status = 0; task->tk_rqstp = req; xprt_request_init(task, xprt); - spin_unlock(&xprt->reserve_lock); -} -EXPORT_SYMBOL_GPL(xprt_alloc_slot); - -void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) -{ - /* Note: grabbing the xprt_lock_write() ensures that we throttle - * new slot allocation if the transport is congested (i.e. when - * reconnecting a stream transport or when out of socket write - * buffer space). - */ - if (xprt_lock_write(xprt, task)) { - xprt_alloc_slot(xprt, task); - xprt_release_write(xprt, task); - } } -EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) { @@ -1100,9 +1083,20 @@ void xprt_reserve(struct rpc_task *task) if (task->tk_rqstp != NULL) return; + /* Note: grabbing the xprt_lock_write() here is not strictly needed, + * but ensures that we throttle new slot allocation if the transport + * is congested (e.g. if reconnecting or if we're out of socket + * write buffer space). + */ task->tk_timeout = 0; task->tk_status = -EAGAIN; - xprt->ops->alloc_slot(xprt, task); + if (!xprt_lock_write(xprt, task)) + return; + + spin_lock(&xprt->reserve_lock); + xprt_alloc_slot(task); + spin_unlock(&xprt->reserve_lock); + xprt_release_write(xprt, task); } static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) diff --git a/trunk/net/sunrpc/xprtrdma/transport.c b/trunk/net/sunrpc/xprtrdma/transport.c index 5d9202dc7cb1..06cdbff79e4a 100644 --- a/trunk/net/sunrpc/xprtrdma/transport.c +++ b/trunk/net/sunrpc/xprtrdma/transport.c @@ -713,7 +713,6 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) static struct rpc_xprt_ops xprt_rdma_procs = { .reserve_xprt = xprt_rdma_reserve_xprt, .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ - .alloc_slot = xprt_alloc_slot, .release_request = xprt_release_rqst_cong, /* ditto */ .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ diff --git a/trunk/net/sunrpc/xprtsock.c b/trunk/net/sunrpc/xprtsock.c index a35b8e52e551..400567243f84 100644 --- a/trunk/net/sunrpc/xprtsock.c +++ b/trunk/net/sunrpc/xprtsock.c @@ -2473,7 +2473,6 @@ static void bc_destroy(struct rpc_xprt *xprt) static struct rpc_xprt_ops xs_local_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, - .alloc_slot = xprt_alloc_slot, .rpcbind = xs_local_rpcbind, .set_port = xs_local_set_port, .connect = xs_connect, @@ -2490,7 +2489,6 @@ static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, .release_xprt = xprt_release_xprt_cong, - .alloc_slot = xprt_alloc_slot, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, @@ -2508,7 +2506,6 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, - .alloc_slot = xprt_lock_and_alloc_slot, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, diff --git a/trunk/scripts/link-vmlinux.sh b/trunk/scripts/link-vmlinux.sh index b3d907eb93a9..4235a6361fec 100644 --- a/trunk/scripts/link-vmlinux.sh +++ b/trunk/scripts/link-vmlinux.sh @@ -74,13 +74,8 @@ kallsyms() info KSYM ${2} local kallsymopt; - if [ -n "${CONFIG_SYMBOL_PREFIX}" ]; then - kallsymopt="${kallsymopt} \ - --symbol-prefix=${CONFIG_SYMBOL_PREFIX}" - fi - if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then - kallsymopt="${kallsymopt} --all-symbols" + kallsymopt=--all-symbols fi local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \