diff --git a/[refs] b/[refs] index 51731a55a2cb..5277bba365ce 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4be0ed42c56c84d980a851f0bdd0fc20b45c3944 +refs/heads/master: f39aa30d7741f40ad964341e9243dbbd7f8ff057 diff --git a/trunk/Documentation/ABI/testing/sysfs-class-scsi_host b/trunk/Documentation/ABI/testing/sysfs-class-scsi_host deleted file mode 100644 index 29a4f892e433..000000000000 --- a/trunk/Documentation/ABI/testing/sysfs-class-scsi_host +++ /dev/null @@ -1,13 +0,0 @@ -What: /sys/class/scsi_host/hostX/isci_id -Date: June 2011 -Contact: Dave Jiang -Description: - This file contains the enumerated host ID for the Intel - SCU controller. The Intel(R) C600 Series Chipset SATA/SAS - Storage Control Unit embeds up to two 4-port controllers in - a single PCI device. The controllers are enumerated in order - which usually means the lowest number scsi_host corresponds - with the first controller, but this association is not - guaranteed. The 'isci_id' attribute unambiguously identifies - the controller index: '0' for the first controller, - '1' for the second. diff --git a/trunk/Documentation/cgroups/memory.txt b/trunk/Documentation/cgroups/memory.txt index 06eb6d957c83..6f3c598971fc 100644 --- a/trunk/Documentation/cgroups/memory.txt +++ b/trunk/Documentation/cgroups/memory.txt @@ -380,7 +380,7 @@ will be charged as a new owner of it. 5.2 stat file -memory.stat file includes following statistics +5.2.1 memory.stat file includes following statistics # per-memory cgroup local status cache - # of bytes of page cache memory. @@ -438,6 +438,89 @@ Note: file_mapped is accounted only when the memory cgroup is owner of page cache.) +5.2.2 memory.vmscan_stat + +memory.vmscan_stat includes statistics information for memory scanning and +freeing, reclaiming. The statistics shows memory scanning information since +memory cgroup creation and can be reset to 0 by writing 0 as + + #echo 0 > ../memory.vmscan_stat + +This file contains following statistics. + +[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy] +[param]_elapsed_ns_by_[reason]_[under_hierarchy] + +For example, + + scanned_file_pages_by_limit indicates the number of scanned + file pages at vmscan. + +Now, 3 parameters are supported + + scanned - the number of pages scanned by vmscan + rotated - the number of pages activated at vmscan + freed - the number of pages freed by vmscan + +If "rotated" is high against scanned/freed, the memcg seems busy. + +Now, 2 reason are supported + + limit - the memory cgroup's limit + system - global memory pressure + softlimit + (global memory pressure not under softlimit is not handled now) + +When under_hierarchy is added in the tail, the number indicates the +total memcg scan of its children and itself. + +elapsed_ns is a elapsed time in nanosecond. This may include sleep time +and not indicates CPU usage. So, please take this as just showing +latency. + +Here is an example. + +# cat /cgroup/memory/A/memory.vmscan_stat +scanned_pages_by_limit 9471864 +scanned_anon_pages_by_limit 6640629 +scanned_file_pages_by_limit 2831235 +rotated_pages_by_limit 4243974 +rotated_anon_pages_by_limit 3971968 +rotated_file_pages_by_limit 272006 +freed_pages_by_limit 2318492 +freed_anon_pages_by_limit 962052 +freed_file_pages_by_limit 1356440 +elapsed_ns_by_limit 351386416101 +scanned_pages_by_system 0 +scanned_anon_pages_by_system 0 +scanned_file_pages_by_system 0 +rotated_pages_by_system 0 +rotated_anon_pages_by_system 0 +rotated_file_pages_by_system 0 +freed_pages_by_system 0 +freed_anon_pages_by_system 0 +freed_file_pages_by_system 0 +elapsed_ns_by_system 0 +scanned_pages_by_limit_under_hierarchy 9471864 +scanned_anon_pages_by_limit_under_hierarchy 6640629 +scanned_file_pages_by_limit_under_hierarchy 2831235 +rotated_pages_by_limit_under_hierarchy 4243974 +rotated_anon_pages_by_limit_under_hierarchy 3971968 +rotated_file_pages_by_limit_under_hierarchy 272006 +freed_pages_by_limit_under_hierarchy 2318492 +freed_anon_pages_by_limit_under_hierarchy 962052 +freed_file_pages_by_limit_under_hierarchy 1356440 +elapsed_ns_by_limit_under_hierarchy 351386416101 +scanned_pages_by_system_under_hierarchy 0 +scanned_anon_pages_by_system_under_hierarchy 0 +scanned_file_pages_by_system_under_hierarchy 0 +rotated_pages_by_system_under_hierarchy 0 +rotated_anon_pages_by_system_under_hierarchy 0 +rotated_file_pages_by_system_under_hierarchy 0 +freed_pages_by_system_under_hierarchy 0 +freed_anon_pages_by_system_under_hierarchy 0 +freed_file_pages_by_system_under_hierarchy 0 +elapsed_ns_by_system_under_hierarchy 0 + 5.3 swappiness Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 854ed5ca7e3f..614d0382e2cb 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -2086,12 +2086,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Override pmtimer IOPort with a hex value. e.g. pmtmr=0x508 - pnp.debug=1 [PNP] - Enable PNP debug messages (depends on the - CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time - via /sys/module/pnp/parameters/debug. We always show - current resource usage; turning this on also shows - possible settings and some assignment information. + pnp.debug [PNP] + Enable PNP debug messages. This depends on the + CONFIG_PNP_DEBUG_MESSAGES option. pnpacpi= [ACPI] { off } diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 0acf9ab3d287..28f65c249b97 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3262,17 +3262,6 @@ F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ -INTEL C600 SERIES SAS CONTROLLER DRIVER -M: Intel SCU Linux support -M: Dan Williams -M: Dave Jiang -M: Ed Nadolski -L: linux-scsi@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git -S: Maintained -F: drivers/scsi/isci/ -F: firmware/isci/ - INTEL IDLE DRIVER M: Len Brown L: linux-pm@lists.linux-foundation.org @@ -4785,7 +4774,7 @@ F: drivers/net/wireless/orinoco/ OSD LIBRARY and FILESYSTEM M: Boaz Harrosh -M: Benny Halevy +M: Benny Halevy L: osd-dev@open-osd.org W: http://open-osd.org T: git git://git.open-osd.org/open-osd.git @@ -7211,9 +7200,6 @@ W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices S: Supported F: Documentation/hwmon/wm83?? F: drivers/leds/leds-wm83*.c -F: drivers/input/misc/wm831x-on.c -F: drivers/input/touchscreen/wm831x-ts.c -F: drivers/input/touchscreen/wm97*.c F: drivers/mfd/wm8*.c F: drivers/power/wm83*.c F: drivers/rtc/rtc-wm83*.c @@ -7223,7 +7209,6 @@ F: drivers/watchdog/wm83*_wdt.c F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ F: include/linux/mfd/wm8400* -F: include/linux/wm97xx.h F: include/sound/wm????.h F: sound/soc/codecs/wm* diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig index 8bb936226dee..60cde53d266c 100644 --- a/trunk/arch/alpha/Kconfig +++ b/trunk/arch/alpha/Kconfig @@ -51,7 +51,7 @@ config GENERIC_CMOS_UPDATE def_bool y config GENERIC_GPIO - bool + def_bool y config ZONE_DMA bool diff --git a/trunk/arch/um/Kconfig.x86 b/trunk/arch/um/Kconfig.x86 index 21bebe63df66..d31ecf346b4e 100644 --- a/trunk/arch/um/Kconfig.x86 +++ b/trunk/arch/um/Kconfig.x86 @@ -10,10 +10,6 @@ config CMPXCHG_LOCAL bool default n -config CMPXCHG_DOUBLE - bool - default n - source "arch/x86/Kconfig.cpu" endmenu diff --git a/trunk/arch/um/Makefile b/trunk/arch/um/Makefile index c0f712cc7c5f..fab8121d2b32 100644 --- a/trunk/arch/um/Makefile +++ b/trunk/arch/um/Makefile @@ -41,7 +41,7 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(ARCH_DIR)/sys-$(SUBARCH) KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \ $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \ -Din6addr_loopback=kernel_in6addr_loopback \ - -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr + -Din6addr_any=kernel_in6addr_any KBUILD_AFLAGS += $(ARCH_INCLUDE) diff --git a/trunk/arch/um/drivers/line.c b/trunk/arch/um/drivers/line.c index 364c8a15c4c3..d51c404239a8 100644 --- a/trunk/arch/um/drivers/line.c +++ b/trunk/arch/um/drivers/line.c @@ -399,8 +399,8 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data) * is done under a spinlock. Checking whether the device is in use is * line->tty->count > 1, also under the spinlock. * - * line->count serves to decide whether the device should be enabled or - * disabled on the host. If it's equal to 0, then we are doing the + * tty->count serves to decide whether the device should be enabled or + * disabled on the host. If it's equal to 1, then we are doing the * first open or last close. Otherwise, open and close just return. */ @@ -414,16 +414,16 @@ int line_open(struct line *lines, struct tty_struct *tty) goto out_unlock; err = 0; - if (line->count++) + if (tty->count > 1) goto out_unlock; - BUG_ON(tty->driver_data); + spin_unlock(&line->count_lock); + tty->driver_data = line; line->tty = tty; - spin_unlock(&line->count_lock); err = enable_chan(line); - if (err) /* line_close() will be called by our caller */ + if (err) return err; INIT_DELAYED_WORK(&line->task, line_timer_cb); @@ -436,7 +436,7 @@ int line_open(struct line *lines, struct tty_struct *tty) chan_window_size(&line->chan_list, &tty->winsize.ws_row, &tty->winsize.ws_col); - return 0; + return err; out_unlock: spin_unlock(&line->count_lock); @@ -460,16 +460,17 @@ void line_close(struct tty_struct *tty, struct file * filp) flush_buffer(line); spin_lock(&line->count_lock); - BUG_ON(!line->valid); + if (!line->valid) + goto out_unlock; - if (--line->count) + if (tty->count > 1) goto out_unlock; + spin_unlock(&line->count_lock); + line->tty = NULL; tty->driver_data = NULL; - spin_unlock(&line->count_lock); - if (line->sigio) { unregister_winch(tty); line->sigio = 0; @@ -497,7 +498,7 @@ static int setup_one_line(struct line *lines, int n, char *init, int init_prio, spin_lock(&line->count_lock); - if (line->count) { + if (line->tty != NULL) { *error_out = "Device is already open"; goto out; } @@ -721,53 +722,41 @@ struct winch { int pid; struct tty_struct *tty; unsigned long stack; - struct work_struct work; }; -static void __free_winch(struct work_struct *work) +static void free_winch(struct winch *winch, int free_irq_ok) { - struct winch *winch = container_of(work, struct winch, work); - free_irq(WINCH_IRQ, winch); + if (free_irq_ok) + free_irq(WINCH_IRQ, winch); + + list_del(&winch->list); if (winch->pid != -1) os_kill_process(winch->pid, 1); + if (winch->fd != -1) + os_close_file(winch->fd); if (winch->stack != 0) free_stack(winch->stack, 0); kfree(winch); } -static void free_winch(struct winch *winch) -{ - int fd = winch->fd; - winch->fd = -1; - if (fd != -1) - os_close_file(fd); - list_del(&winch->list); - __free_winch(&winch->work); -} - static irqreturn_t winch_interrupt(int irq, void *data) { struct winch *winch = data; struct tty_struct *tty; struct line *line; - int fd = winch->fd; int err; char c; - if (fd != -1) { - err = generic_read(fd, &c, NULL); + if (winch->fd != -1) { + err = generic_read(winch->fd, &c, NULL); if (err < 0) { if (err != -EAGAIN) { - winch->fd = -1; - list_del(&winch->list); - os_close_file(fd); printk(KERN_ERR "winch_interrupt : " "read failed, errno = %d\n", -err); printk(KERN_ERR "fd %d is losing SIGWINCH " "support\n", winch->tty_fd); - INIT_WORK(&winch->work, __free_winch); - schedule_work(&winch->work); + free_winch(winch, 0); return IRQ_HANDLED; } goto out; @@ -839,7 +828,7 @@ static void unregister_winch(struct tty_struct *tty) list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); if (winch->tty == tty) { - free_winch(winch); + free_winch(winch, 1); break; } } @@ -855,7 +844,7 @@ static void winch_cleanup(void) list_for_each_safe(ele, next, &winch_handlers) { winch = list_entry(ele, struct winch, list); - free_winch(winch); + free_winch(winch, 1); } spin_unlock(&winch_handler_lock); diff --git a/trunk/arch/um/drivers/xterm.c b/trunk/arch/um/drivers/xterm.c index 2e1de5728604..8ac7146c237f 100644 --- a/trunk/arch/um/drivers/xterm.c +++ b/trunk/arch/um/drivers/xterm.c @@ -123,7 +123,6 @@ static int xterm_open(int input, int output, int primary, void *d, err = -errno; printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n", errno); - close(fd); return err; } close(fd); diff --git a/trunk/arch/um/include/asm/ptrace-generic.h b/trunk/arch/um/include/asm/ptrace-generic.h index 1a7d2757fe05..ae084ad1a3a0 100644 --- a/trunk/arch/um/include/asm/ptrace-generic.h +++ b/trunk/arch/um/include/asm/ptrace-generic.h @@ -42,6 +42,10 @@ extern long subarch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern unsigned long getreg(struct task_struct *child, int regno); extern int putreg(struct task_struct *child, int regno, unsigned long value); +extern int get_fpregs(struct user_i387_struct __user *buf, + struct task_struct *child); +extern int set_fpregs(struct user_i387_struct __user *buf, + struct task_struct *child); extern int arch_copy_tls(struct task_struct *new); extern void clear_flushed_tls(struct task_struct *task); diff --git a/trunk/arch/um/include/shared/line.h b/trunk/arch/um/include/shared/line.h index 63df3ca02ac2..72f4f25af247 100644 --- a/trunk/arch/um/include/shared/line.h +++ b/trunk/arch/um/include/shared/line.h @@ -33,7 +33,6 @@ struct line_driver { struct line { struct tty_struct *tty; spinlock_t count_lock; - unsigned long count; int valid; char *init_str; diff --git a/trunk/arch/um/include/shared/registers.h b/trunk/arch/um/include/shared/registers.h index f1e0aa56c52a..b0b4589e0ebc 100644 --- a/trunk/arch/um/include/shared/registers.h +++ b/trunk/arch/um/include/shared/registers.h @@ -16,7 +16,7 @@ extern int restore_fpx_registers(int pid, unsigned long *fp_regs); extern int save_registers(int pid, struct uml_pt_regs *regs); extern int restore_registers(int pid, struct uml_pt_regs *regs); extern int init_registers(int pid); -extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); +extern void get_safe_registers(unsigned long *regs); extern unsigned long get_thread_reg(int reg, jmp_buf *buf); extern int get_fp_registers(int pid, unsigned long *regs); extern int put_fp_registers(int pid, unsigned long *regs); diff --git a/trunk/arch/um/kernel/process.c b/trunk/arch/um/kernel/process.c index 21c1ae7c3d75..fab4371184f6 100644 --- a/trunk/arch/um/kernel/process.c +++ b/trunk/arch/um/kernel/process.c @@ -202,7 +202,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, arch_copy_thread(¤t->thread.arch, &p->thread.arch); } else { - get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); + get_safe_registers(p->thread.regs.regs.gp); p->thread.request.u.thread = current->thread.request.u.thread; handler = new_thread_handler; } diff --git a/trunk/arch/um/kernel/ptrace.c b/trunk/arch/um/kernel/ptrace.c index c9da32b0c707..701b672c1122 100644 --- a/trunk/arch/um/kernel/ptrace.c +++ b/trunk/arch/um/kernel/ptrace.c @@ -50,11 +50,23 @@ long arch_ptrace(struct task_struct *child, long request, void __user *vp = p; switch (request) { + /* read word at location addr. */ + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: + ret = generic_ptrace_peekdata(child, addr, data); + break; + /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: ret = peek_user(child, addr, data); break; + /* write the word at location addr. */ + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + ret = generic_ptrace_pokedata(child, addr, data); + break; + /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: ret = poke_user(child, addr, data); @@ -94,6 +106,16 @@ long arch_ptrace(struct task_struct *child, long request, ret = 0; break; } +#endif +#ifdef PTRACE_GETFPREGS + case PTRACE_GETFPREGS: /* Get the child FPU state. */ + ret = get_fpregs(vp, child); + break; +#endif +#ifdef PTRACE_SETFPREGS + case PTRACE_SETFPREGS: /* Set the child FPU state. */ + ret = set_fpregs(vp, child); + break; #endif case PTRACE_GET_THREAD_AREA: ret = ptrace_get_thread_area(child, addr, vp); @@ -131,6 +153,12 @@ long arch_ptrace(struct task_struct *child, long request, ret = -EIO; break; } +#endif +#ifdef PTRACE_ARCH_PRCTL + case PTRACE_ARCH_PRCTL: + /* XXX Calls ptrace on the host - needs some SMP thinking */ + ret = arch_prctl(child, data, (void __user *) addr); + break; #endif default: ret = ptrace_request(child, request, addr, data); diff --git a/trunk/arch/um/os-Linux/registers.c b/trunk/arch/um/os-Linux/registers.c index b866b9e3bef9..830fe6a1518a 100644 --- a/trunk/arch/um/os-Linux/registers.c +++ b/trunk/arch/um/os-Linux/registers.c @@ -8,8 +8,6 @@ #include #include #include "sysdep/ptrace.h" -#include "sysdep/ptrace_user.h" -#include "registers.h" int save_registers(int pid, struct uml_pt_regs *regs) { @@ -34,7 +32,6 @@ int restore_registers(int pid, struct uml_pt_regs *regs) /* This is set once at boot time and not changed thereafter */ static unsigned long exec_regs[MAX_REG_NR]; -static unsigned long exec_fp_regs[FP_SIZE]; int init_registers(int pid) { @@ -45,14 +42,10 @@ int init_registers(int pid) return -errno; arch_init_registers(pid); - get_fp_registers(pid, exec_fp_regs); return 0; } -void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) +void get_safe_registers(unsigned long *regs) { memcpy(regs, exec_regs, sizeof(exec_regs)); - - if (fp_regs) - memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs)); } diff --git a/trunk/arch/um/os-Linux/skas/mem.c b/trunk/arch/um/os-Linux/skas/mem.c index e771398be5f3..d261f170d120 100644 --- a/trunk/arch/um/os-Linux/skas/mem.c +++ b/trunk/arch/um/os-Linux/skas/mem.c @@ -39,7 +39,7 @@ static unsigned long syscall_regs[MAX_REG_NR]; static int __init init_syscall_regs(void) { - get_safe_registers(syscall_regs, NULL); + get_safe_registers(syscall_regs); syscall_regs[REGS_IP_INDEX] = STUB_CODE + ((unsigned long) &batch_syscall_stub - (unsigned long) &__syscall_stub_start); diff --git a/trunk/arch/um/os-Linux/skas/process.c b/trunk/arch/um/os-Linux/skas/process.c index dee0e8cf8ad0..d6e0a2234b86 100644 --- a/trunk/arch/um/os-Linux/skas/process.c +++ b/trunk/arch/um/os-Linux/skas/process.c @@ -373,9 +373,6 @@ void userspace(struct uml_pt_regs *regs) if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) fatal_sigsegv(); - if (put_fp_registers(pid, regs->fp)) - fatal_sigsegv(); - /* Now we set local_using_sysemu to be used for one loop */ local_using_sysemu = get_using_sysemu(); @@ -402,12 +399,6 @@ void userspace(struct uml_pt_regs *regs) fatal_sigsegv(); } - if (get_fp_registers(pid, regs->fp)) { - printk(UM_KERN_ERR "userspace - get_fp_registers failed, " - "errno = %d\n", errno); - fatal_sigsegv(); - } - UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ if (WIFSTOPPED(status)) { @@ -466,11 +457,10 @@ void userspace(struct uml_pt_regs *regs) } static unsigned long thread_regs[MAX_REG_NR]; -static unsigned long thread_fp_regs[FP_SIZE]; static int __init init_thread_regs(void) { - get_safe_registers(thread_regs, thread_fp_regs); + get_safe_registers(thread_regs); /* Set parent's instruction pointer to start of clone-stub */ thread_regs[REGS_IP_INDEX] = STUB_CODE + (unsigned long) stub_clone_handler - @@ -513,13 +503,6 @@ int copy_context_skas0(unsigned long new_stack, int pid) return err; } - err = put_fp_registers(pid, thread_fp_regs); - if (err < 0) { - printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " - "failed, pid = %d, err = %d\n", pid, err); - return err; - } - /* set a well known return code for detection of child write failure */ child_data->err = 12345678; diff --git a/trunk/arch/um/sys-i386/asm/ptrace.h b/trunk/arch/um/sys-i386/asm/ptrace.h index 5d2a59112537..0273e4d09af7 100644 --- a/trunk/arch/um/sys-i386/asm/ptrace.h +++ b/trunk/arch/um/sys-i386/asm/ptrace.h @@ -42,6 +42,11 @@ */ struct user_desc; +extern int get_fpxregs(struct user_fxsr_struct __user *buf, + struct task_struct *child); +extern int set_fpxregs(struct user_fxsr_struct __user *buf, + struct task_struct *tsk); + extern int ptrace_get_thread_area(struct task_struct *child, int idx, struct user_desc __user *user_desc); diff --git a/trunk/arch/um/sys-i386/ptrace.c b/trunk/arch/um/sys-i386/ptrace.c index 3375c2717851..d23b2d3ea384 100644 --- a/trunk/arch/um/sys-i386/ptrace.c +++ b/trunk/arch/um/sys-i386/ptrace.c @@ -145,7 +145,7 @@ int peek_user(struct task_struct *child, long addr, long data) return put_user(tmp, (unsigned long __user *) data); } -static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_i387_struct fpregs; @@ -161,7 +161,7 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c return n; } -static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_i387_struct fpregs; @@ -174,7 +174,7 @@ static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *c (unsigned long *) &fpregs); } -static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) +int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_fxsr_struct fpregs; @@ -190,7 +190,7 @@ static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct * return n; } -static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) +int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; struct user_fxsr_struct fpregs; @@ -206,23 +206,5 @@ static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct * long subarch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { - int ret = -EIO; - void __user *datap = (void __user *) data; - switch (request) { - case PTRACE_GETFPREGS: /* Get the child FPU state. */ - ret = get_fpregs(datap, child); - break; - case PTRACE_SETFPREGS: /* Set the child FPU state. */ - ret = set_fpregs(datap, child); - break; - case PTRACE_GETFPXREGS: /* Get the child FPU state. */ - ret = get_fpxregs(datap, child); - break; - case PTRACE_SETFPXREGS: /* Set the child FPU state. */ - ret = set_fpxregs(datap, child); - break; - default: - ret = -EIO; - } - return ret; + return -EIO; } diff --git a/trunk/arch/um/sys-i386/shared/sysdep/ptrace.h b/trunk/arch/um/sys-i386/shared/sysdep/ptrace.h index c398a5076111..d50e62e07070 100644 --- a/trunk/arch/um/sys-i386/shared/sysdep/ptrace.h +++ b/trunk/arch/um/sys-i386/shared/sysdep/ptrace.h @@ -53,7 +53,6 @@ extern int sysemu_supported; struct uml_pt_regs { unsigned long gp[MAX_REG_NR]; - unsigned long fp[HOST_FPX_SIZE]; struct faultinfo faultinfo; long syscall; int is_user; diff --git a/trunk/arch/um/sys-x86_64/ptrace.c b/trunk/arch/um/sys-x86_64/ptrace.c index 4005506834fd..f43613643cdb 100644 --- a/trunk/arch/um/sys-x86_64/ptrace.c +++ b/trunk/arch/um/sys-x86_64/ptrace.c @@ -145,7 +145,7 @@ int is_syscall(unsigned long addr) return instr == 0x050f; } -static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int err, n, cpu = ((struct thread_info *) child->stack)->cpu; long fpregs[HOST_FP_SIZE]; @@ -162,7 +162,7 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c return n; } -static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) +int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) { int n, cpu = ((struct thread_info *) child->stack)->cpu; long fpregs[HOST_FP_SIZE]; @@ -182,16 +182,12 @@ long subarch_ptrace(struct task_struct *child, long request, void __user *datap = (void __user *) data; switch (request) { - case PTRACE_GETFPREGS: /* Get the child FPU state. */ + case PTRACE_GETFPXREGS: /* Get the child FPU state. */ ret = get_fpregs(datap, child); break; - case PTRACE_SETFPREGS: /* Set the child FPU state. */ + case PTRACE_SETFPXREGS: /* Set the child FPU state. */ ret = set_fpregs(datap, child); break; - case PTRACE_ARCH_PRCTL: - /* XXX Calls ptrace on the host - needs some SMP thinking */ - ret = arch_prctl(child, data, (void __user *) addr); - break; } return ret; diff --git a/trunk/arch/um/sys-x86_64/shared/sysdep/ptrace.h b/trunk/arch/um/sys-x86_64/shared/sysdep/ptrace.h index 8ee8f8e12af1..fdba5457947a 100644 --- a/trunk/arch/um/sys-x86_64/shared/sysdep/ptrace.h +++ b/trunk/arch/um/sys-x86_64/shared/sysdep/ptrace.h @@ -85,7 +85,6 @@ struct uml_pt_regs { unsigned long gp[MAX_REG_NR]; - unsigned long fp[HOST_FP_SIZE]; struct faultinfo faultinfo; long syscall; int is_user; diff --git a/trunk/arch/x86/include/asm/alternative-asm.h b/trunk/arch/x86/include/asm/alternative-asm.h index 091508b533b4..4554cc6fb96a 100644 --- a/trunk/arch/x86/include/asm/alternative-asm.h +++ b/trunk/arch/x86/include/asm/alternative-asm.h @@ -16,6 +16,7 @@ #endif .macro altinstruction_entry orig alt feature orig_len alt_len + .align 8 .long \orig - . .long \alt - . .word \feature diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 37ad100a2210..23fb6d79f209 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -48,6 +48,9 @@ struct alt_instr { u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction, <= instrlen */ +#ifdef CONFIG_X86_64 + u32 pad2; +#endif }; extern void alternative_instructions(void); @@ -80,6 +83,7 @@ static inline int alternatives_text_reserved(void *start, void *end) \ "661:\n\t" oldinstr "\n662:\n" \ ".section .altinstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ " .long 661b - .\n" /* label */ \ " .long 663f - .\n" /* new instruction */ \ " .word " __stringify(feature) "\n" /* feature bit */ \ diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 88b23a43f340..4258aac99a6e 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -332,6 +332,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) asm goto("1: jmp %l[t_no]\n" "2:\n" ".section .altinstructions,\"a\"\n" + _ASM_ALIGN "\n" " .long 1b - .\n" " .long 0\n" /* no replacement */ " .word %P0\n" /* feature bit */ @@ -349,6 +350,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) asm volatile("1: movb $0,%0\n" "2:\n" ".section .altinstructions,\"a\"\n" + _ASM_ALIGN "\n" " .long 1b - .\n" " .long 3f - .\n" " .word %P1\n" /* feature bit */ diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index 3dd53f997b11..20a614275064 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -1721,8 +1721,10 @@ void __init xen_setup_machphys_mapping(void) machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } #ifdef CONFIG_X86_32 - WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) - < machine_to_phys_mapping); + if ((machine_to_phys_mapping + machine_to_phys_nr) + < machine_to_phys_mapping) + machine_to_phys_nr = (unsigned long *)NULL + - machine_to_phys_mapping; #endif } diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index 46d6d21dbdbe..c3b8d440873c 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -306,12 +306,10 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_limit = xen_get_max_pages(); - if (max_pfn + extra_pages > extra_limit) { - if (extra_limit > max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; - } + if (extra_limit >= max_pfn) + extra_pages = extra_limit - max_pfn; + else + extra_pages = 0; extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index 041d4fe9dfe4..d4fc6d454f8d 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -532,6 +532,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) WARN_ON(xen_smp_intr_init(0)); xen_init_lock_cpu(0); + xen_init_spinlocks(); } static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) diff --git a/trunk/arch/x86/xen/time.c b/trunk/arch/x86/xen/time.c index 163b4679556e..5158c505bef9 100644 --- a/trunk/arch/x86/xen/time.c +++ b/trunk/arch/x86/xen/time.c @@ -168,10 +168,9 @@ cycle_t xen_clocksource_read(void) struct pvclock_vcpu_time_info *src; cycle_t ret; - preempt_disable_notrace(); - src = &__get_cpu_var(xen_vcpu)->time; + src = &get_cpu_var(xen_vcpu)->time; ret = pvclock_clocksource_read(src); - preempt_enable_notrace(); + put_cpu_var(xen_vcpu); return ret; } diff --git a/trunk/drivers/acpi/acpica/acconfig.h b/trunk/drivers/acpi/acpica/acconfig.h index f895a244ca7e..bc533dde16c4 100644 --- a/trunk/drivers/acpi/acpica/acconfig.h +++ b/trunk/drivers/acpi/acpica/acconfig.h @@ -121,7 +121,7 @@ /* Maximum sleep allowed via Sleep() operator */ -#define ACPI_MAX_SLEEP 2000 /* Two seconds */ +#define ACPI_MAX_SLEEP 20000 /* Two seconds */ /****************************************************************************** * diff --git a/trunk/drivers/acpi/apei/Kconfig b/trunk/drivers/acpi/apei/Kconfig index e3f47872ec22..c34aa51af4ee 100644 --- a/trunk/drivers/acpi/apei/Kconfig +++ b/trunk/drivers/acpi/apei/Kconfig @@ -13,7 +13,6 @@ config ACPI_APEI_GHES bool "APEI Generic Hardware Error Source" depends on ACPI_APEI && X86 select ACPI_HED - select IRQ_WORK select LLIST select GENERIC_ALLOCATOR help diff --git a/trunk/drivers/acpi/apei/apei-base.c b/trunk/drivers/acpi/apei/apei-base.c index 61540360d5ce..8041248fce9b 100644 --- a/trunk/drivers/acpi/apei/apei-base.c +++ b/trunk/drivers/acpi/apei/apei-base.c @@ -618,7 +618,7 @@ int apei_osc_setup(void) }; capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; - capbuf[OSC_SUPPORT_TYPE] = 1; + capbuf[OSC_SUPPORT_TYPE] = 0; capbuf[OSC_CONTROL_TYPE] = 0; if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) diff --git a/trunk/drivers/cpufreq/pcc-cpufreq.c b/trunk/drivers/cpufreq/pcc-cpufreq.c index cdc02ac8f41a..7b0603eb0129 100644 --- a/trunk/drivers/cpufreq/pcc-cpufreq.c +++ b/trunk/drivers/cpufreq/pcc-cpufreq.c @@ -261,9 +261,6 @@ static int pcc_get_offset(int cpu) pr = per_cpu(processors, cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); - if (!pr) - return -ENODEV; - status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); if (ACPI_FAILURE(status)) return -ENODEV; diff --git a/trunk/drivers/firewire/ohci.c b/trunk/drivers/firewire/ohci.c index 57cd3a406edf..fd7170a9ad2c 100644 --- a/trunk/drivers/firewire/ohci.c +++ b/trunk/drivers/firewire/ohci.c @@ -290,6 +290,9 @@ static const struct { {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, + {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_NO_MSI}, + {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, diff --git a/trunk/drivers/gpio/gpio-generic.c b/trunk/drivers/gpio/gpio-generic.c index 4e24436b0f82..231714def4d2 100644 --- a/trunk/drivers/gpio/gpio-generic.c +++ b/trunk/drivers/gpio/gpio-generic.c @@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc, return 0; } -int bgpio_remove(struct bgpio_chip *bgc) +int __devexit bgpio_remove(struct bgpio_chip *bgc) { int err = gpiochip_remove(&bgc->gc); @@ -361,10 +361,15 @@ int bgpio_remove(struct bgpio_chip *bgc) } EXPORT_SYMBOL_GPL(bgpio_remove); -int bgpio_init(struct bgpio_chip *bgc, struct device *dev, - unsigned long sz, void __iomem *dat, void __iomem *set, - void __iomem *clr, void __iomem *dirout, void __iomem *dirin, - bool big_endian) +int __devinit bgpio_init(struct bgpio_chip *bgc, + struct device *dev, + unsigned long sz, + void __iomem *dat, + void __iomem *set, + void __iomem *clr, + void __iomem *dirout, + void __iomem *dirin, + bool big_endian) { int ret; diff --git a/trunk/drivers/hid/hid-wacom.c b/trunk/drivers/hid/hid-wacom.c index 72ca689b6474..a597039d0755 100644 --- a/trunk/drivers/hid/hid-wacom.c +++ b/trunk/drivers/hid/hid-wacom.c @@ -373,8 +373,6 @@ static int wacom_probe(struct hid_device *hdev, hidinput = list_entry(hdev->inputs.next, struct hid_input, list); input = hidinput->input; - __set_bit(INPUT_PROP_POINTER, input->propbit); - /* Basics */ input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); diff --git a/trunk/drivers/hwmon/coretemp.c b/trunk/drivers/hwmon/coretemp.c index 411257676133..59d83e83da7f 100644 --- a/trunk/drivers/hwmon/coretemp.c +++ b/trunk/drivers/hwmon/coretemp.c @@ -601,12 +601,7 @@ static int create_core_data(struct platform_data *pdata, err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); if (!err) { tdata->attr_size += MAX_THRESH_ATTRS; - tdata->tmin = tdata->tjmax - - ((eax & THERM_MASK_THRESHOLD0) >> - THERM_SHIFT_THRESHOLD0) * 1000; - tdata->ttarget = tdata->tjmax - - ((eax & THERM_MASK_THRESHOLD1) >> - THERM_SHIFT_THRESHOLD1) * 1000; + tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; } pdata->core_data[attr_no] = tdata; diff --git a/trunk/drivers/hwmon/pmbus/pmbus_core.c b/trunk/drivers/hwmon/pmbus/pmbus_core.c index 397fc59b5682..a561c3a0e916 100644 --- a/trunk/drivers/hwmon/pmbus/pmbus_core.c +++ b/trunk/drivers/hwmon/pmbus/pmbus_core.c @@ -978,8 +978,6 @@ static void pmbus_find_max_attr(struct i2c_client *client, struct pmbus_limit_attr { u16 reg; /* Limit register */ bool update; /* True if register needs updates */ - bool low; /* True if low limit; for limits with compare - functions only */ const char *attr; /* Attribute name */ const char *alarm; /* Alarm attribute name */ u32 sbit; /* Alarm attribute status bit */ @@ -1031,8 +1029,7 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client, if (attr->compare) { pmbus_add_boolean_cmp(data, name, l->alarm, index, - l->low ? cindex : cbase, - l->low ? cbase : cindex, + cbase, cindex, attr->sbase + page, l->sbit); } else { pmbus_add_boolean_reg(data, name, @@ -1369,13 +1366,11 @@ static const struct pmbus_sensor_attr power_attributes[] = { static const struct pmbus_limit_attr temp_limit_attrs[] = { { .reg = PMBUS_UT_WARN_LIMIT, - .low = true, .attr = "min", .alarm = "min_alarm", .sbit = PB_TEMP_UT_WARNING, }, { .reg = PMBUS_UT_FAULT_LIMIT, - .low = true, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_TEMP_UT_FAULT, @@ -1404,13 +1399,11 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = { static const struct pmbus_limit_attr temp_limit_attrs23[] = { { .reg = PMBUS_UT_WARN_LIMIT, - .low = true, .attr = "min", .alarm = "min_alarm", .sbit = PB_TEMP_UT_WARNING, }, { .reg = PMBUS_UT_FAULT_LIMIT, - .low = true, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_TEMP_UT_FAULT, diff --git a/trunk/drivers/input/keyboard/adp5588-keys.c b/trunk/drivers/input/keyboard/adp5588-keys.c index e34eeb8ae371..7b404e5443ed 100644 --- a/trunk/drivers/input/keyboard/adp5588-keys.c +++ b/trunk/drivers/input/keyboard/adp5588-keys.c @@ -668,3 +668,4 @@ module_exit(adp5588_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Hennerich "); MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); +MODULE_ALIAS("platform:adp5588-keys"); diff --git a/trunk/drivers/input/misc/cm109.c b/trunk/drivers/input/misc/cm109.c index ab860511f016..b09c7d127219 100644 --- a/trunk/drivers/input/misc/cm109.c +++ b/trunk/drivers/input/misc/cm109.c @@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) le16_to_cpu(dev->ctl_req->wIndex), dev->ctl_data, USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); - if (error < 0 && error != -EINTR) + if (error && error != EINTR) err("%s: usb_control_msg() failed %d", __func__, error); } diff --git a/trunk/drivers/input/mouse/bcm5974.c b/trunk/drivers/input/mouse/bcm5974.c index 5ec617e28f7e..da280189ef07 100644 --- a/trunk/drivers/input/mouse/bcm5974.c +++ b/trunk/drivers/input/mouse/bcm5974.c @@ -67,10 +67,6 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 -/* MacbookAir4,1 (unibody, July 2011) */ -#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 -#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a -#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b /* MacbookAir4,2 (unibody, July 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d @@ -116,10 +112,6 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), - /* MacbookAir4,1 */ - BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), - BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), - BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), /* MacbookAir4,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), @@ -342,18 +334,6 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } }, - { - USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, - USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, - USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, - HAS_INTEGRATED_BUTTON, - 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, - { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } - }, {} }; diff --git a/trunk/drivers/input/tablet/wacom_sys.c b/trunk/drivers/input/tablet/wacom_sys.c index 958b4eb6369d..d27c9d91630b 100644 --- a/trunk/drivers/input/tablet/wacom_sys.c +++ b/trunk/drivers/input/tablet/wacom_sys.c @@ -229,6 +229,13 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi get_unaligned_le16(&report[i + 3]); i += 4; } + } else if (usage == WCM_DIGITIZER) { + /* max pressure isn't reported + features->pressure_max = (unsigned short) + (report[i+4] << 8 | report[i + 3]); + */ + features->pressure_max = 255; + i += 4; } break; @@ -284,6 +291,13 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi pen = 1; i++; break; + + case HID_USAGE_UNDEFINED: + if (usage == WCM_DESKTOP && finger) /* capacity */ + features->pressure_max = + get_unaligned_le16(&report[i + 3]); + i += 4; + break; } break; diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 0dc97ec15c28..c1c2f7b28d89 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -800,26 +800,25 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) int i; for (i = 0; i < 2; i++) { - int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); - bool touch = data[offset + 3] & 0x80; + int p = data[9 * i + 2]; + bool touch = p && !wacom->shared->stylus_in_proximity; + input_mt_slot(input, i); + input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); /* * Touch events need to be disabled while stylus is * in proximity because user's hand is resting on touchpad * and sending unwanted events. User expects tablet buttons * to continue working though. */ - touch = touch && !wacom->shared->stylus_in_proximity; - - input_mt_slot(input, i); - input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { - int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; - int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; + int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; + int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { x <<= 5; y <<= 5; } + input_report_abs(input, ABS_MT_PRESSURE, p); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } @@ -1057,11 +1056,10 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, features->y_fuzz, 0); - - if (features->device_type == BTN_TOOL_PEN) { - input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, + input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, features->pressure_fuzz, 0); + if (features->device_type == BTN_TOOL_PEN) { /* penabled devices have fixed resolution for each model */ input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); @@ -1100,8 +1098,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); - - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case WACOM_21UX2: @@ -1130,9 +1126,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, } input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); - wacom_setup_cintiq(wacom_wac); break; @@ -1157,8 +1150,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, /* fall through */ case INTUOS: - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); - wacom_setup_intuos(wacom_wac); break; @@ -1174,8 +1165,6 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); wacom_setup_intuos(wacom_wac); - - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case TABLETPC2FG: @@ -1194,40 +1183,26 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, case TABLETPC: __clear_bit(ABS_MISC, input_dev->absbit); - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); - if (features->device_type != BTN_TOOL_PEN) break; /* no need to process stylus stuff */ /* fall through */ case PL: + case PTU: case DTU: __set_bit(BTN_TOOL_PEN, input_dev->keybit); - __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); - - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); - break; - - case PTU: - __set_bit(BTN_STYLUS2, input_dev->keybit); /* fall through */ case PENPARTNER: - __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); - __set_bit(BTN_STYLUS, input_dev->keybit); - - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case BAMBOO_PT: __clear_bit(ABS_MISC, input_dev->absbit); - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); - if (features->device_type == BTN_TOOL_DOUBLETAP) { __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); diff --git a/trunk/drivers/input/touchscreen/wacom_w8001.c b/trunk/drivers/input/touchscreen/wacom_w8001.c index 9941d39df43d..c14412ef4648 100644 --- a/trunk/drivers/input/touchscreen/wacom_w8001.c +++ b/trunk/drivers/input/touchscreen/wacom_w8001.c @@ -383,8 +383,6 @@ static int w8001_setup(struct w8001 *w8001) dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); - __set_bit(INPUT_PROP_DIRECT, dev->propbit); - /* penabled? */ error = w8001_command(w8001, W8001_CMD_QUERY, true); if (!error) { diff --git a/trunk/drivers/leds/ledtrig-timer.c b/trunk/drivers/leds/ledtrig-timer.c index 328c64c0841c..d87c9d02f786 100644 --- a/trunk/drivers/leds/ledtrig-timer.c +++ b/trunk/drivers/leds/ledtrig-timer.c @@ -41,7 +41,6 @@ static ssize_t led_delay_on_store(struct device *dev, if (count == size) { led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); - led_cdev->blink_delay_on = state; ret = count; } @@ -70,7 +69,6 @@ static ssize_t led_delay_off_store(struct device *dev, if (count == size) { led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); - led_cdev->blink_delay_off = state; ret = count; } diff --git a/trunk/drivers/misc/pti.c b/trunk/drivers/misc/pti.c index 0b56e3f43573..06df1877ad0f 100644 --- a/trunk/drivers/misc/pti.c +++ b/trunk/drivers/misc/pti.c @@ -165,11 +165,6 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc, static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, const char *thread_name) { - /* - * Since we access the comm member in current's task_struct, we only - * need to be as large as what 'comm' in that structure is. - */ - char comm[TASK_COMM_LEN]; struct pti_masterchannel mccontrol = {.master = CONTROL_ID, .channel = 0}; const char *thread_name_p; @@ -177,6 +172,13 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, u8 control_frame[CONTROL_FRAME_LEN]; if (!thread_name) { + /* + * Since we access the comm member in current's task_struct, + * we only need to be as large as what 'comm' in that + * structure is. + */ + char comm[TASK_COMM_LEN]; + if (!in_interrupt()) get_task_comm(comm, current); else diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index f3f94a5c068f..b1187ff31d89 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -1351,8 +1351,7 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data) * will occur as normal. */ if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || - (dev->bus->self && - dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) + dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) *smpss = 0; if (*smpss > dev->pcie_mpss) diff --git a/trunk/drivers/rtc/rtc-imxdi.c b/trunk/drivers/rtc/rtc-imxdi.c index d93a9608b1f0..2dd3c0163272 100644 --- a/trunk/drivers/rtc/rtc-imxdi.c +++ b/trunk/drivers/rtc/rtc-imxdi.c @@ -35,7 +35,6 @@ #include #include #include -#include #include /* DryIce Register Definitions */ diff --git a/trunk/drivers/rtc/rtc-s3c.c b/trunk/drivers/rtc/rtc-s3c.c index 7639ab906f02..4e7c04e773e0 100644 --- a/trunk/drivers/rtc/rtc-s3c.c +++ b/trunk/drivers/rtc/rtc-s3c.c @@ -51,27 +51,6 @@ static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); -static void s3c_rtc_alarm_clk_enable(bool enable) -{ - static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); - static bool alarm_clk_enabled; - unsigned long irq_flags; - - spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); - if (enable) { - if (!alarm_clk_enabled) { - clk_enable(rtc_clk); - alarm_clk_enabled = true; - } - } else { - if (alarm_clk_enabled) { - clk_disable(rtc_clk); - alarm_clk_enabled = false; - } - } - spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); -} - /* IRQ Handlers */ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) @@ -85,9 +64,6 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); clk_disable(rtc_clk); - - s3c_rtc_alarm_clk_enable(false); - return IRQ_HANDLED; } @@ -121,8 +97,6 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); clk_disable(rtc_clk); - s3c_rtc_alarm_clk_enable(enabled); - return 0; } diff --git a/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c b/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c index dba72a4e6a1c..9ae80cd5953b 100644 --- a/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, nopout_wqe->itt = ((u16)task->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_TMF_REQUEST_TYPE_SHIFT)); - nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); + nopout_wqe->ttt = nopout_hdr->ttt; nopout_wqe->flags = 0; if (!unsol) nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c index 5d0e9a24ae94..ba710e350ac5 100644 --- a/trunk/drivers/scsi/fcoe/fcoe.c +++ b/trunk/drivers/scsi/fcoe/fcoe.c @@ -432,8 +432,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) u8 flogi_maddr[ETH_ALEN]; const struct net_device_ops *ops; - rtnl_lock(); - /* * Don't listen for Ethernet packets anymore. * synchronize_net() ensures that the packet handlers are not running @@ -463,8 +461,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) " specific feature for LLD.\n"); } - rtnl_unlock(); - /* Release the self-reference taken during fcoe_interface_create() */ fcoe_interface_put(fcoe); } @@ -1955,8 +1951,11 @@ static void fcoe_destroy_work(struct work_struct *work) fcoe_if_destroy(port->lport); /* Do not tear down the fcoe interface for NPIV port */ - if (!npiv) + if (!npiv) { + rtnl_lock(); fcoe_interface_cleanup(fcoe); + rtnl_unlock(); + } mutex_unlock(&fcoe_config_mutex); } @@ -2010,9 +2009,8 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", netdev->name); rc = -EIO; - rtnl_unlock(); fcoe_interface_cleanup(fcoe); - goto out_nortnl; + goto out_nodev; } /* Make this the "master" N_Port */ @@ -2029,7 +2027,6 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) out_nodev: rtnl_unlock(); -out_nortnl: mutex_unlock(&fcoe_config_mutex); return rc; } diff --git a/trunk/drivers/scsi/hpsa.c b/trunk/drivers/scsi/hpsa.c index b200b736b000..ec61bdb833ac 100644 --- a/trunk/drivers/scsi/hpsa.c +++ b/trunk/drivers/scsi/hpsa.c @@ -676,16 +676,6 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); removed[*nremoved] = h->dev[entry]; (*nremoved)++; - - /* - * New physical devices won't have target/lun assigned yet - * so we need to preserve the values in the slot we are replacing. - */ - if (new_entry->target == -1) { - new_entry->target = h->dev[entry]->target; - new_entry->lun = h->dev[entry]->lun; - } - h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; @@ -1558,17 +1548,10 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, } static int hpsa_update_device_info(struct ctlr_info *h, - unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, - unsigned char *is_OBDR_device) + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) { - -#define OBDR_SIG_OFFSET 43 -#define OBDR_TAPE_SIG "$DR-10" -#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) -#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) - +#define OBDR_TAPE_INQ_SIZE 49 unsigned char *inq_buff; - unsigned char *obdr_sig; inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); if (!inq_buff) @@ -1600,16 +1583,6 @@ static int hpsa_update_device_info(struct ctlr_info *h, else this_device->raid_level = RAID_UNKNOWN; - if (is_OBDR_device) { - /* See if this is a One-Button-Disaster-Recovery device - * by looking for "$DR-10" at offset 43 in inquiry data. - */ - obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; - *is_OBDR_device = (this_device->devtype == TYPE_ROM && - strncmp(obdr_sig, OBDR_TAPE_SIG, - OBDR_SIG_LEN) == 0); - } - kfree(inq_buff); return 0; @@ -1743,7 +1716,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, return 0; } - if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) + if (hpsa_update_device_info(h, scsi3addr, this_device)) return 0; (*nmsa2xxx_enclosures)++; hpsa_set_bus_target_lun(this_device, bus, target, 0); @@ -1835,6 +1808,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) */ struct ReportLUNdata *physdev_list = NULL; struct ReportLUNdata *logdev_list = NULL; + unsigned char *inq_buff = NULL; u32 nphysicals = 0; u32 nlogicals = 0; u32 ndev_allocated = 0; @@ -1850,9 +1824,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) GFP_KERNEL); physdev_list = kzalloc(reportlunsize, GFP_KERNEL); logdev_list = kzalloc(reportlunsize, GFP_KERNEL); + inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); - if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { + if (!currentsd || !physdev_list || !logdev_list || + !inq_buff || !tmpdevice) { dev_err(&h->pdev->dev, "out of memory\n"); goto out; } @@ -1887,7 +1863,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) /* adjust our table of devices */ nmsa2xxx_enclosures = 0; for (i = 0; i < nphysicals + nlogicals + 1; i++) { - u8 *lunaddrbytes, is_OBDR = 0; + u8 *lunaddrbytes; /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, @@ -1898,8 +1874,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) continue; /* Get device type, vendor, model, device id */ - if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, - &is_OBDR)) + if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) continue; /* skip it if we can't talk to it. */ figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, tmpdevice); @@ -1923,7 +1898,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) hpsa_set_bus_target_lun(this_device, bus, target, lun); switch (this_device->devtype) { - case TYPE_ROM: + case TYPE_ROM: { /* We don't *really* support actual CD-ROM devices, * just "One Button Disaster Recovery" tape drive * which temporarily pretends to be a CD-ROM drive. @@ -1931,8 +1906,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) * device by checking for "$DR-10" in bytes 43-48 of * the inquiry data. */ - if (is_OBDR) - ncurrent++; + char obdr_sig[7]; +#define OBDR_TAPE_SIG "$DR-10" + strncpy(obdr_sig, &inq_buff[43], 6); + obdr_sig[6] = '\0'; + if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) + /* Not OBDR device, ignore it. */ + break; + } + ncurrent++; break; case TYPE_DISK: if (i < nphysicals) @@ -1965,6 +1947,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) for (i = 0; i < ndev_allocated; i++) kfree(currentsd[i]); kfree(currentsd); + kfree(inq_buff); kfree(physdev_list); kfree(logdev_list); } diff --git a/trunk/drivers/scsi/isci/host.c b/trunk/drivers/scsi/isci/host.c index 6981b773a88d..26072f1e9852 100644 --- a/trunk/drivers/scsi/isci/host.c +++ b/trunk/drivers/scsi/isci/host.c @@ -531,9 +531,6 @@ static void sci_controller_process_completions(struct isci_host *ihost) break; case SCU_COMPLETION_TYPE_EVENT: - sci_controller_event_completion(ihost, ent); - break; - case SCU_COMPLETION_TYPE_NOTIFY: { event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); @@ -1094,7 +1091,6 @@ static void isci_host_completion_routine(unsigned long data) struct isci_request *request; struct isci_request *next_request; struct sas_task *task; - u16 active; INIT_LIST_HEAD(&completed_request_list); INIT_LIST_HEAD(&errored_request_list); @@ -1185,13 +1181,6 @@ static void isci_host_completion_routine(unsigned long data) } } - /* the coalesence timeout doubles at each encoding step, so - * update it based on the ilog2 value of the outstanding requests - */ - active = isci_tci_active(ihost); - writel(SMU_ICC_GEN_VAL(NUMBER, active) | - SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), - &ihost->smu_registers->interrupt_coalesce_control); } /** @@ -1482,7 +1471,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* set the default interrupt coalescence number and timeout value. */ - sci_controller_set_interrupt_coalescence(ihost, 0, 0); + sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); } static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) diff --git a/trunk/drivers/scsi/isci/host.h b/trunk/drivers/scsi/isci/host.h index 9f33831a2f04..062101a39f79 100644 --- a/trunk/drivers/scsi/isci/host.h +++ b/trunk/drivers/scsi/isci/host.h @@ -369,9 +369,6 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) -/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ -#define ISCI_COALESCE_BASE 9 - /* expander attached sata devices require 3 rnc slots */ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) { diff --git a/trunk/drivers/scsi/isci/init.c b/trunk/drivers/scsi/isci/init.c index 29aa34efb0f5..61e0d09e2b57 100644 --- a/trunk/drivers/scsi/isci/init.c +++ b/trunk/drivers/scsi/isci/init.c @@ -59,19 +59,10 @@ #include #include #include -#include #include "isci.h" #include "task.h" #include "probe_roms.h" -#define MAJ 1 -#define MIN 0 -#define BUILD 0 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) - -MODULE_VERSION(DRV_VERSION); - static struct scsi_transport_template *isci_transport_template; static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { @@ -122,22 +113,6 @@ unsigned char max_concurr_spinup = 1; module_param(max_concurr_spinup, byte, 0); MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); -static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); - struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); - struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); - - return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); -} - -static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); - -struct device_attribute *isci_host_attrs[] = { - &dev_attr_isci_id, - NULL -}; - static struct scsi_host_template isci_sht = { .module = THIS_MODULE, @@ -163,7 +138,6 @@ static struct scsi_host_template isci_sht = { .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, - .shost_attrs = isci_host_attrs, }; static struct sas_domain_function_template isci_transport_ops = { @@ -258,6 +232,17 @@ static int isci_register_sas_ha(struct isci_host *isci_host) return 0; } +static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); + + return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); +} + +static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); + static void isci_unregister(struct isci_host *isci_host) { struct Scsi_Host *shost; @@ -266,6 +251,7 @@ static void isci_unregister(struct isci_host *isci_host) return; shost = isci_host->shost; + device_remove_file(&shost->shost_dev, &dev_attr_isci_id); sas_unregister_ha(&isci_host->sas_ha); @@ -429,8 +415,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) if (err) goto err_shost_remove; + err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); + if (err) + goto err_unregister_ha; + return isci_host; + err_unregister_ha: + sas_unregister_ha(&(isci_host->sas_ha)); err_shost_remove: scsi_remove_host(shost); err_shost: @@ -548,8 +540,7 @@ static __init int isci_init(void) { int err; - pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", - DRV_NAME, DRV_VERSION); + pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); if (!isci_transport_template) diff --git a/trunk/drivers/scsi/isci/phy.c b/trunk/drivers/scsi/isci/phy.c index 430fc8ff014a..79313a7a2356 100644 --- a/trunk/drivers/scsi/isci/phy.c +++ b/trunk/drivers/scsi/isci/phy.c @@ -104,7 +104,6 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; - u32 sp_timeouts = 0; iphy->link_layer_registers = reg; @@ -212,18 +211,6 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); writel(llctl, &iphy->link_layer_registers->link_layer_control); - sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); - - /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ - sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); - - /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can - * lock with 3Gb drive when SCU max rate is set to 1.5Gb. - */ - sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); - - writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); - if (is_a2(ihost->pdev)) { /* Program the max ARB time for the PHY to 700us so we inter-operate with * the PMC expander which shuts down PHYs if the expander PHY generates too diff --git a/trunk/drivers/scsi/isci/registers.h b/trunk/drivers/scsi/isci/registers.h index 00afc738bbed..9b266c7428e8 100644 --- a/trunk/drivers/scsi/isci/registers.h +++ b/trunk/drivers/scsi/isci/registers.h @@ -1299,18 +1299,6 @@ struct scu_transport_layer_registers { #define SCU_AFE_XCVRCR_OFFSET 0x00DC #define SCU_AFE_LUTCR_OFFSET 0x00E0 -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL) -#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL) - -#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) - #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) diff --git a/trunk/drivers/scsi/isci/request.c b/trunk/drivers/scsi/isci/request.c index b5d3a8c4d329..a46e07ac789f 100644 --- a/trunk/drivers/scsi/isci/request.c +++ b/trunk/drivers/scsi/isci/request.c @@ -732,20 +732,12 @@ sci_io_request_terminate(struct isci_request *ireq) sci_change_state(&ireq->sm, SCI_REQ_ABORTING); return SCI_SUCCESS; case SCI_REQ_TASK_WAIT_TC_RESP: - /* The task frame was already confirmed to have been - * sent by the SCU HW. Since the state machine is - * now only waiting for the task response itself, - * abort the request and complete it immediately - * and don't wait for the task response. - */ sci_change_state(&ireq->sm, SCI_REQ_ABORTING); sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_ABORTING: - /* If a request has a termination requested twice, return - * a failure indication, since HW confirmation of the first - * abort is still outstanding. - */ + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + return SCI_SUCCESS; case SCI_REQ_COMPLETED: default: dev_warn(&ireq->owning_controller->pdev->dev, @@ -2407,19 +2399,22 @@ static void isci_task_save_for_upper_layer_completion( } } -static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) +static void isci_request_process_stp_response(struct sas_task *task, + void *response_buffer) { + struct dev_to_host_fis *d2h_reg_fis = response_buffer; struct task_status_struct *ts = &task->task_status; struct ata_task_resp *resp = (void *)&ts->buf[0]; - resp->frame_len = sizeof(*fis); - memcpy(resp->ending_fis, fis, sizeof(*fis)); + resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); + memcpy(&resp->ending_fis[0], response_buffer + 16, 24); ts->buf_valid_size = sizeof(*resp); - /* If the device fault bit is set in the status register, then + /** + * If the device fault bit is set in the status register, then * set the sense data and return. */ - if (fis->status & ATA_DF) + if (d2h_reg_fis->status & ATA_DF) ts->stat = SAS_PROTO_RESPONSE; else ts->stat = SAM_STAT_GOOD; @@ -2433,6 +2428,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, { struct sas_task *task = isci_request_access_task(request); struct ssp_response_iu *resp_iu; + void *resp_buf; unsigned long task_flags; struct isci_remote_device *idev = isci_lookup_device(task->dev); enum service_response response = SAS_TASK_UNDELIVERED; @@ -2569,7 +2565,9 @@ static void isci_request_io_request_complete(struct isci_host *ihost, task); if (sas_protocol_ata(task->task_proto)) { - isci_process_stp_response(task, &request->stp.rsp); + resp_buf = &request->stp.rsp; + isci_request_process_stp_response(task, + resp_buf); } else if (SAS_PROTOCOL_SSP == task->task_proto) { /* crack the iu response buffer. */ diff --git a/trunk/drivers/scsi/isci/unsolicited_frame_control.c b/trunk/drivers/scsi/isci/unsolicited_frame_control.c index 16f88ab939c8..e9e1e2abacb9 100644 --- a/trunk/drivers/scsi/isci/unsolicited_frame_control.c +++ b/trunk/drivers/scsi/isci/unsolicited_frame_control.c @@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) */ buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); - size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); /* * The Unsolicited Frame buffers are set at the start of the UF diff --git a/trunk/drivers/scsi/isci/unsolicited_frame_control.h b/trunk/drivers/scsi/isci/unsolicited_frame_control.h index 75d896686f5a..31cb9506f52d 100644 --- a/trunk/drivers/scsi/isci/unsolicited_frame_control.h +++ b/trunk/drivers/scsi/isci/unsolicited_frame_control.h @@ -214,7 +214,7 @@ struct sci_uf_address_table_array { * starting address of the UF address table. * 64-bit pointers are required by the hardware. */ - u64 *array; + dma_addr_t *array; /** * This field specifies the physical address location for the UF diff --git a/trunk/drivers/scsi/libfc/fc_exch.c b/trunk/drivers/scsi/libfc/fc_exch.c index d261e982a2fa..01ff082dc34c 100644 --- a/trunk/drivers/scsi/libfc/fc_exch.c +++ b/trunk/drivers/scsi/libfc/fc_exch.c @@ -494,9 +494,6 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, */ error = lport->tt.frame_send(lport, fp); - if (fh->fh_type == FC_TYPE_BLS) - return error; - /* * Update the exchange and sequence flags, * assuming all frames for the sequence have been sent. @@ -578,35 +575,42 @@ static void fc_seq_set_resp(struct fc_seq *sp, } /** - * fc_exch_abort_locked() - Abort an exchange - * @ep: The exchange to be aborted + * fc_seq_exch_abort() - Abort an exchange and sequence + * @req_sp: The sequence to be aborted * @timer_msec: The period of time to wait before aborting * - * Locking notes: Called with exch lock held - * - * Return value: 0 on success else error code + * Generally called because of a timeout or an abort from the upper layer. */ -static int fc_exch_abort_locked(struct fc_exch *ep, - unsigned int timer_msec) +static int fc_seq_exch_abort(const struct fc_seq *req_sp, + unsigned int timer_msec) { struct fc_seq *sp; + struct fc_exch *ep; struct fc_frame *fp; int error; + ep = fc_seq_exch(req_sp); + + spin_lock_bh(&ep->ex_lock); if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || - ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { + spin_unlock_bh(&ep->ex_lock); return -ENXIO; + } /* * Send the abort on a new sequence if possible. */ sp = fc_seq_start_next_locked(&ep->seq); - if (!sp) + if (!sp) { + spin_unlock_bh(&ep->ex_lock); return -ENOMEM; + } ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; if (timer_msec) fc_exch_timer_set_locked(ep, timer_msec); + spin_unlock_bh(&ep->ex_lock); /* * If not logged into the fabric, don't send ABTS but leave @@ -628,28 +632,6 @@ static int fc_exch_abort_locked(struct fc_exch *ep, return error; } -/** - * fc_seq_exch_abort() - Abort an exchange and sequence - * @req_sp: The sequence to be aborted - * @timer_msec: The period of time to wait before aborting - * - * Generally called because of a timeout or an abort from the upper layer. - * - * Return value: 0 on success else error code - */ -static int fc_seq_exch_abort(const struct fc_seq *req_sp, - unsigned int timer_msec) -{ - struct fc_exch *ep; - int error; - - ep = fc_seq_exch(req_sp); - spin_lock_bh(&ep->ex_lock); - error = fc_exch_abort_locked(ep, timer_msec); - spin_unlock_bh(&ep->ex_lock); - return error; -} - /** * fc_exch_timeout() - Handle exchange timer expiration * @work: The work_struct identifying the exchange that timed out @@ -1733,7 +1715,6 @@ static void fc_exch_reset(struct fc_exch *ep) int rc = 1; spin_lock_bh(&ep->ex_lock); - fc_exch_abort_locked(ep, 0); ep->state |= FC_EX_RST_CLEANUP; if (cancel_delayed_work(&ep->timeout_work)) atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ @@ -1981,7 +1962,6 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, struct fc_exch *ep; struct fc_seq *sp = NULL; struct fc_frame_header *fh; - struct fc_fcp_pkt *fsp = NULL; int rc = 1; ep = fc_exch_alloc(lport, fp); @@ -2004,10 +1984,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, fc_exch_setup_hdr(ep, fp, ep->f_ctl); sp->cnt++; - if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { - fsp = fr_fsp(fp); + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); - } if (unlikely(lport->tt.frame_send(lport, fp))) goto err; @@ -2021,8 +1999,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport, spin_unlock_bh(&ep->ex_lock); return sp; err: - if (fsp) - fc_fcp_ddp_done(fsp); + fc_fcp_ddp_done(fr_fsp(fp)); rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) diff --git a/trunk/drivers/scsi/libfc/fc_fcp.c b/trunk/drivers/scsi/libfc/fc_fcp.c index 4c41ee816f0b..afb63c843144 100644 --- a/trunk/drivers/scsi/libfc/fc_fcp.c +++ b/trunk/drivers/scsi/libfc/fc_fcp.c @@ -2019,11 +2019,6 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) struct fc_fcp_internal *si; int rc = FAILED; unsigned long flags; - int rval; - - rval = fc_block_scsi_eh(sc_cmd); - if (rval) - return rval; lport = shost_priv(sc_cmd->device->host); if (lport->state != LPORT_ST_READY) @@ -2073,9 +2068,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) int rc = FAILED; int rval; - rval = fc_block_scsi_eh(sc_cmd); + rval = fc_remote_port_chkready(rport); if (rval) - return rval; + goto out; lport = shost_priv(sc_cmd->device->host); @@ -2121,8 +2116,6 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) FC_SCSI_DBG(lport, "Resetting host\n"); - fc_block_scsi_eh(sc_cmd); - lport->tt.lport_reset(lport); wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, diff --git a/trunk/drivers/scsi/libfc/fc_lport.c b/trunk/drivers/scsi/libfc/fc_lport.c index 628f347404f9..e55ed9cf23fb 100644 --- a/trunk/drivers/scsi/libfc/fc_lport.c +++ b/trunk/drivers/scsi/libfc/fc_lport.c @@ -88,7 +88,6 @@ */ #include -#include #include #include @@ -1030,16 +1029,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport) FCH_EVT_LIPRESET, 0); fc_vports_linkchange(lport); fc_lport_reset_locked(lport); - if (lport->link_up) { - /* - * Wait upto resource allocation time out before - * doing re-login since incomplete FIP exchanged - * from last session may collide with exchanges - * in new session. - */ - msleep(lport->r_a_tov); + if (lport->link_up) fc_lport_enter_flogi(lport); - } } /** diff --git a/trunk/drivers/scsi/qla2xxx/qla_attr.c b/trunk/drivers/scsi/qla2xxx/qla_attr.c index a31e05f3bfd4..7836eb01c7fc 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_attr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_attr.c @@ -1786,16 +1786,13 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); } - if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { + if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { - int prot = 0; vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_user, vha, 0x7082, "Registered for DIF/DIX type 1 and 3 protection.\n"); - if (ql2xenabledif == 1) - prot = SHOST_DIX_TYPE0_PROTECTION; scsi_host_set_prot(vha->host, - prot | SHOST_DIF_TYPE1_PROTECTION + SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.c b/trunk/drivers/scsi/qla2xxx/qla_dbg.c index d79cd8a5f831..2155071f3100 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_dbg.c +++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.c @@ -8,24 +8,24 @@ /* * Table for showing the current message id in use for particular level * Change this table for addition of log/debug messages. - * ---------------------------------------------------------------------- - * | Level | Last Value Used | Holes | - * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x0116 | | - * | Mailbox commands | 0x1126 | | - * | Device Discovery | 0x2083 | | - * | Queue Command and IO tracing | 0x302e | 0x3008 | - * | DPC Thread | 0x401c | | - * | Async Events | 0x5059 | | - * | Timer Routines | 0x600d | | - * | User Space Interactions | 0x709d | | - * | Task Management | 0x8041 | | - * | AER/EEH | 0x900f | | - * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb04f | | - * | MultiQ | 0xc00b | | - * | Misc | 0xd00b | | - * ---------------------------------------------------------------------- + * ----------------------------------------------------- + * | Level | Last Value Used | + * ----------------------------------------------------- + * | Module Init and Probe | 0x0116 | + * | Mailbox commands | 0x111e | + * | Device Discovery | 0x2083 | + * | Queue Command and IO tracing | 0x302e | + * | DPC Thread | 0x401c | + * | Async Events | 0x5059 | + * | Timer Routines | 0x600d | + * | User Space Interactions | 0x709c | + * | Task Management | 0x8043 | + * | AER/EEH | 0x900f | + * | Virtual Port | 0xa007 | + * | ISP82XX Specific | 0xb027 | + * | MultiQ | 0xc00b | + * | Misc | 0xd00b | + * ----------------------------------------------------- */ #include "qla_def.h" diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h index a03eaf40f377..cc5a79259d33 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_def.h +++ b/trunk/drivers/scsi/qla2xxx/qla_def.h @@ -2529,7 +2529,6 @@ struct qla_hw_data { #define DT_ISP8021 BIT_14 #define DT_ISP_LAST (DT_ISP8021 << 1) -#define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 #define DT_FWI2 BIT_27 #define DT_ZIO_SUPPORTED BIT_28 @@ -2573,7 +2572,6 @@ struct qla_hw_data { #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) -#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) diff --git a/trunk/drivers/scsi/qla2xxx/qla_fw.h b/trunk/drivers/scsi/qla2xxx/qla_fw.h index aa69486dc064..691783abfb69 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_fw.h +++ b/trunk/drivers/scsi/qla2xxx/qla_fw.h @@ -537,11 +537,6 @@ struct sts_entry_24xx { /* * If DIF Error is set in comp_status, these additional fields are * defined: - * - * !!! NOTE: Firmware sends expected/actual DIF data in big endian - * format; but all of the "data" field gets swab32-d in the beginning - * of qla2x00_status_entry(). - * * &data[10] : uint8_t report_runt_bg[2]; - computed guard * &data[12] : uint8_t actual_dif[8]; - DIF Data received * &data[20] : uint8_t expected_dif[8]; - DIF Data computed diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c index 37da04d3db26..def694271bf7 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_init.c +++ b/trunk/drivers/scsi/qla2xxx/qla_init.c @@ -3838,12 +3838,15 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) req = vha->req; rsp = req->rsp; + atomic_set(&vha->loop_state, LOOP_UPDATE); clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (vha->flags.online) { if (!(rval = qla2x00_fw_ready(vha))) { /* Wait at most MAX_TARGET RSCNs for a stable link. */ wait_time = 256; do { + atomic_set(&vha->loop_state, LOOP_UPDATE); + /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); diff --git a/trunk/drivers/scsi/qla2xxx/qla_inline.h b/trunk/drivers/scsi/qla2xxx/qla_inline.h index 9902834e0b74..d2e904bc21c0 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_inline.h +++ b/trunk/drivers/scsi/qla2xxx/qla_inline.h @@ -102,32 +102,3 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) fcport->d_id.b.al_pa); } } - -static inline int -qla2x00_hba_err_chk_enabled(srb_t *sp) -{ - /* - * Uncomment when corresponding SCSI changes are done. - * - if (!sp->cmd->prot_chk) - return 0; - * - */ - - switch (scsi_get_prot_op(sp->cmd)) { - case SCSI_PROT_READ_STRIP: - case SCSI_PROT_WRITE_INSERT: - if (ql2xenablehba_err_chk >= 1) - return 1; - break; - case SCSI_PROT_READ_PASS: - case SCSI_PROT_WRITE_PASS: - if (ql2xenablehba_err_chk >= 2) - return 1; - break; - case SCSI_PROT_READ_INSERT: - case SCSI_PROT_WRITE_STRIP: - return 1; - } - return 0; -} diff --git a/trunk/drivers/scsi/qla2xxx/qla_iocb.c b/trunk/drivers/scsi/qla2xxx/qla_iocb.c index dbec89622a0f..49d6906af886 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_iocb.c +++ b/trunk/drivers/scsi/qla2xxx/qla_iocb.c @@ -709,28 +709,20 @@ struct fw_dif_context { * */ static inline void -qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, +qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, unsigned int protcnt) { - struct scsi_cmnd *cmd = sp->cmd; + struct sd_dif_tuple *spt; scsi_qla_host_t *vha = shost_priv(cmd->device->host); + unsigned char op = scsi_get_prot_op(cmd); switch (scsi_get_prot_type(cmd)) { + /* For TYPE 0 protection: no checking */ case SCSI_PROT_DIF_TYPE0: - /* - * No check for ql2xenablehba_err_chk, as it would be an - * I/O error if hba tag generation is not done. - */ - pkt->ref_tag = cpu_to_le32((uint32_t) - (0xffffffff & scsi_get_lba(cmd))); - - if (!qla2x00_hba_err_chk_enabled(sp)) - break; - - pkt->ref_tag_mask[0] = 0xff; - pkt->ref_tag_mask[1] = 0xff; - pkt->ref_tag_mask[2] = 0xff; - pkt->ref_tag_mask[3] = 0xff; + pkt->ref_tag_mask[0] = 0x00; + pkt->ref_tag_mask[1] = 0x00; + pkt->ref_tag_mask[2] = 0x00; + pkt->ref_tag_mask[3] = 0x00; break; /* @@ -738,16 +730,20 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, * match LBA in CDB + N */ case SCSI_PROT_DIF_TYPE2: - pkt->app_tag = __constant_cpu_to_le16(0); - pkt->app_tag_mask[0] = 0x0; - pkt->app_tag_mask[1] = 0x0; + if (!ql2xenablehba_err_chk) + break; + + if (scsi_prot_sg_count(cmd)) { + spt = page_address(sg_page(scsi_prot_sglist(cmd))) + + scsi_prot_sglist(cmd)[0].offset; + pkt->app_tag = swab32(spt->app_tag); + pkt->app_tag_mask[0] = 0xff; + pkt->app_tag_mask[1] = 0xff; + } pkt->ref_tag = cpu_to_le32((uint32_t) (0xffffffff & scsi_get_lba(cmd))); - if (!qla2x00_hba_err_chk_enabled(sp)) - break; - /* enable ALL bytes of the ref tag */ pkt->ref_tag_mask[0] = 0xff; pkt->ref_tag_mask[1] = 0xff; @@ -767,15 +763,26 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, * 16 bit app tag. */ case SCSI_PROT_DIF_TYPE1: - pkt->ref_tag = cpu_to_le32((uint32_t) - (0xffffffff & scsi_get_lba(cmd))); - pkt->app_tag = __constant_cpu_to_le16(0); - pkt->app_tag_mask[0] = 0x0; - pkt->app_tag_mask[1] = 0x0; - - if (!qla2x00_hba_err_chk_enabled(sp)) + if (!ql2xenablehba_err_chk) break; + if (protcnt && (op == SCSI_PROT_WRITE_STRIP || + op == SCSI_PROT_WRITE_PASS)) { + spt = page_address(sg_page(scsi_prot_sglist(cmd))) + + scsi_prot_sglist(cmd)[0].offset; + ql_dbg(ql_dbg_io, vha, 0x3008, + "LBA from user %p, lba = 0x%x for cmd=%p.\n", + spt, (int)spt->ref_tag, cmd); + pkt->ref_tag = swab32(spt->ref_tag); + pkt->app_tag_mask[0] = 0x0; + pkt->app_tag_mask[1] = 0x0; + } else { + pkt->ref_tag = cpu_to_le32((uint32_t) + (0xffffffff & scsi_get_lba(cmd))); + pkt->app_tag = __constant_cpu_to_le16(0); + pkt->app_tag_mask[0] = 0x0; + pkt->app_tag_mask[1] = 0x0; + } /* enable ALL bytes of the ref tag */ pkt->ref_tag_mask[0] = 0xff; pkt->ref_tag_mask[1] = 0xff; @@ -791,161 +798,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, scsi_get_prot_type(cmd), cmd); } -struct qla2_sgx { - dma_addr_t dma_addr; /* OUT */ - uint32_t dma_len; /* OUT */ - - uint32_t tot_bytes; /* IN */ - struct scatterlist *cur_sg; /* IN */ - - /* for book keeping, bzero on initial invocation */ - uint32_t bytes_consumed; - uint32_t num_bytes; - uint32_t tot_partial; - - /* for debugging */ - uint32_t num_sg; - srb_t *sp; -}; -static int -qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, - uint32_t *partial) -{ - struct scatterlist *sg; - uint32_t cumulative_partial, sg_len; - dma_addr_t sg_dma_addr; - - if (sgx->num_bytes == sgx->tot_bytes) - return 0; - - sg = sgx->cur_sg; - cumulative_partial = sgx->tot_partial; - - sg_dma_addr = sg_dma_address(sg); - sg_len = sg_dma_len(sg); - - sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; - - if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { - sgx->dma_len = (blk_sz - cumulative_partial); - sgx->tot_partial = 0; - sgx->num_bytes += blk_sz; - *partial = 0; - } else { - sgx->dma_len = sg_len - sgx->bytes_consumed; - sgx->tot_partial += sgx->dma_len; - *partial = 1; - } - - sgx->bytes_consumed += sgx->dma_len; - - if (sg_len == sgx->bytes_consumed) { - sg = sg_next(sg); - sgx->num_sg++; - sgx->cur_sg = sg; - sgx->bytes_consumed = 0; - } - - return 1; -} - -static int -qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds) -{ - void *next_dsd; - uint8_t avail_dsds = 0; - uint32_t dsd_list_len; - struct dsd_dma *dsd_ptr; - struct scatterlist *sg_prot; - uint32_t *cur_dsd = dsd; - uint16_t used_dsds = tot_dsds; - - uint32_t prot_int; - uint32_t partial; - struct qla2_sgx sgx; - dma_addr_t sle_dma; - uint32_t sle_dma_len, tot_prot_dma_len = 0; - struct scsi_cmnd *cmd = sp->cmd; - - prot_int = cmd->device->sector_size; - - memset(&sgx, 0, sizeof(struct qla2_sgx)); - sgx.tot_bytes = scsi_bufflen(sp->cmd); - sgx.cur_sg = scsi_sglist(sp->cmd); - sgx.sp = sp; - - sg_prot = scsi_prot_sglist(sp->cmd); - - while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { - - sle_dma = sgx.dma_addr; - sle_dma_len = sgx.dma_len; -alloc_and_fill: - /* Allocate additional continuation packets? */ - if (avail_dsds == 0) { - avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? - QLA_DSDS_PER_IOCB : used_dsds; - dsd_list_len = (avail_dsds + 1) * 12; - used_dsds -= avail_dsds; - - /* allocate tracking DS */ - dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); - if (!dsd_ptr) - return 1; - - /* allocate new list */ - dsd_ptr->dsd_addr = next_dsd = - dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, - &dsd_ptr->dsd_list_dma); - - if (!next_dsd) { - /* - * Need to cleanup only this dsd_ptr, rest - * will be done by sp_free_dma() - */ - kfree(dsd_ptr); - return 1; - } - - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->ctx)->dsd_list); - - sp->flags |= SRB_CRC_CTX_DSD_VALID; - - /* add new list to cmd iocb or last list */ - *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); - *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *cur_dsd++ = dsd_list_len; - cur_dsd = (uint32_t *)next_dsd; - } - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(sle_dma_len); - avail_dsds--; - - if (partial == 0) { - /* Got a full protection interval */ - sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; - sle_dma_len = 8; - - tot_prot_dma_len += sle_dma_len; - if (tot_prot_dma_len == sg_dma_len(sg_prot)) { - tot_prot_dma_len = 0; - sg_prot = sg_next(sg_prot); - } - - partial = 1; /* So as to not re-enter this block */ - goto alloc_and_fill; - } - } - /* Null termination */ - *cur_dsd++ = 0; - *cur_dsd++ = 0; - *cur_dsd++ = 0; - return 0; -} static int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, uint16_t tot_dsds) @@ -1128,7 +981,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *cur_seg; int sgc; - uint32_t total_bytes = 0; + uint32_t total_bytes; uint32_t data_bytes; uint32_t dif_bytes; uint8_t bundling = 1; @@ -1170,10 +1023,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, __constant_cpu_to_le16(CF_READ_DATA); } - if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || - (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || - (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || - (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) + tot_prot_dsds = scsi_prot_sg_count(cmd); + if (!tot_prot_dsds) bundling = 0; /* Allocate CRC context from global pool */ @@ -1196,7 +1047,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); - qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) + qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) &crc_ctx_pkt->ref_tag, tot_prot_dsds); cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); @@ -1225,6 +1076,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->additional_cdb_len |= 2; int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); + host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( @@ -1255,28 +1107,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ /* Compute dif len and adjust data len to incude protection */ + total_bytes = data_bytes; dif_bytes = 0; blk_size = cmd->device->sector_size; - dif_bytes = (data_bytes / blk_size) * 8; - - switch (scsi_get_prot_op(sp->cmd)) { - case SCSI_PROT_READ_INSERT: - case SCSI_PROT_WRITE_STRIP: - total_bytes = data_bytes; - data_bytes += dif_bytes; - break; - - case SCSI_PROT_READ_STRIP: - case SCSI_PROT_WRITE_INSERT: - case SCSI_PROT_READ_PASS: - case SCSI_PROT_WRITE_PASS: - total_bytes = data_bytes + dif_bytes; - break; - default: - BUG(); + if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + dif_bytes = (data_bytes / blk_size) * 8; + total_bytes += dif_bytes; } - if (!qla2x00_hba_err_chk_enabled(sp)) + if (!ql2xenablehba_err_chk) fw_prot_opts |= 0x10; /* Disable Guard tag checking */ if (!bundling) { @@ -1312,12 +1151,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->control_flags |= __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); - - if (!bundling && tot_prot_dsds) { - if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, - cur_dsd, tot_dsds)) - goto crc_queuing_error; - } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, + if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, (tot_dsds - tot_prot_dsds))) goto crc_queuing_error; @@ -1580,22 +1414,6 @@ qla24xx_dif_start_scsi(srb_t *sp) goto queuing_error; else sp->flags |= SRB_DMA_VALID; - - if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || - (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { - struct qla2_sgx sgx; - uint32_t partial; - - memset(&sgx, 0, sizeof(struct qla2_sgx)); - sgx.tot_bytes = scsi_bufflen(cmd); - sgx.cur_sg = scsi_sglist(cmd); - sgx.sp = sp; - - nseg = 0; - while (qla24xx_get_one_block_sg( - cmd->device->sector_size, &sgx, &partial)) - nseg++; - } } else nseg = 0; @@ -1610,11 +1428,6 @@ qla24xx_dif_start_scsi(srb_t *sp) goto queuing_error; else sp->flags |= SRB_CRC_PROT_DMA_VALID; - - if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || - (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { - nseg = scsi_bufflen(cmd) / cmd->device->sector_size; - } } else { nseg = 0; } @@ -1641,7 +1454,6 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Build header part of command packet (excluding the OPCODE). */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; - sp->handle = handle; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; diff --git a/trunk/drivers/scsi/qla2xxx/qla_isr.c b/trunk/drivers/scsi/qla2xxx/qla_isr.c index 646fc5263d50..b16b7725dee0 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_isr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_isr.c @@ -719,6 +719,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) vha->flags.rscn_queue_overflow = 1; } + atomic_set(&vha->loop_state, LOOP_UPDATE); atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; @@ -1434,27 +1435,25 @@ struct scsi_dif_tuple { * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST * to indicate to the kernel that the HBA detected error. */ -static inline int +static inline void qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_cmnd *cmd = sp->cmd; - uint8_t *ap = &sts24->data[12]; - uint8_t *ep = &sts24->data[20]; + struct scsi_dif_tuple *ep = + (struct scsi_dif_tuple *)&sts24->data[20]; + struct scsi_dif_tuple *ap = + (struct scsi_dif_tuple *)&sts24->data[12]; uint32_t e_ref_tag, a_ref_tag; uint16_t e_app_tag, a_app_tag; uint16_t e_guard, a_guard; - /* - * swab32 of the "data" field in the beginning of qla2x00_status_entry() - * would make guard field appear at offset 2 - */ - a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); - a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); - a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); - e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); - e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); - e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); + e_ref_tag = be32_to_cpu(ep->ref_tag); + a_ref_tag = be32_to_cpu(ap->ref_tag); + e_app_tag = be16_to_cpu(ep->app_tag); + a_app_tag = be16_to_cpu(ap->app_tag); + e_guard = be16_to_cpu(ep->guard); + a_guard = be16_to_cpu(ap->guard); ql_dbg(ql_dbg_io, vha, 0x3023, "iocb(s) %p Returned STATUS.\n", sts24); @@ -1466,63 +1465,6 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); - /* - * Ignore sector if: - * For type 3: ref & app tag is all 'f's - * For type 0,1,2: app tag is all 'f's - */ - if ((a_app_tag == 0xffff) && - ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || - (a_ref_tag == 0xffffffff))) { - uint32_t blocks_done, resid; - sector_t lba_s = scsi_get_lba(cmd); - - /* 2TB boundary case covered automatically with this */ - blocks_done = e_ref_tag - (uint32_t)lba_s + 1; - - resid = scsi_bufflen(cmd) - (blocks_done * - cmd->device->sector_size); - - scsi_set_resid(cmd, resid); - cmd->result = DID_OK << 16; - - /* Update protection tag */ - if (scsi_prot_sg_count(cmd)) { - uint32_t i, j = 0, k = 0, num_ent; - struct scatterlist *sg; - struct sd_dif_tuple *spt; - - /* Patch the corresponding protection tags */ - scsi_for_each_prot_sg(cmd, sg, - scsi_prot_sg_count(cmd), i) { - num_ent = sg_dma_len(sg) / 8; - if (k + num_ent < blocks_done) { - k += num_ent; - continue; - } - j = blocks_done - k - 1; - k = blocks_done; - break; - } - - if (k != blocks_done) { - qla_printk(KERN_WARNING, sp->fcport->vha->hw, - "unexpected tag values tag:lba=%x:%lx)\n", - e_ref_tag, lba_s); - return 1; - } - - spt = page_address(sg_page(sg)) + sg->offset; - spt += j; - - spt->app_tag = 0xffff; - if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = 0xffffffff; - } - - return 0; - } - /* check guard */ if (e_guard != a_guard) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, @@ -1530,30 +1472,28 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; - return 1; + return; } - /* check ref tag */ - if (e_ref_tag != a_ref_tag) { + /* check appl tag */ + if (e_app_tag != a_app_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x3); + 0x10, 0x2); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; - return 1; + return; } - /* check appl tag */ - if (e_app_tag != a_app_tag) { + /* check ref tag */ + if (e_ref_tag != a_ref_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x2); + 0x10, 0x3); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; - return 1; + return; } - - return 1; } /** @@ -1827,7 +1767,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; case CS_DIF_ERROR: - logit = qla2x00_handle_dif_error(sp, sts24); + qla2x00_handle_dif_error(sp, sts24); break; default: cp->result = DID_ERROR << 16; @@ -2528,10 +2468,11 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) goto skip_msi; } - if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { + if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || + !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { ql_log(ql_log_warn, vha, 0x0035, "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", - ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); + ha->pdev->revision, ha->fw_attributes); goto skip_msix; } diff --git a/trunk/drivers/scsi/qla2xxx/qla_mid.c b/trunk/drivers/scsi/qla2xxx/qla_mid.c index f488cc69fc79..c706ed370000 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mid.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mid.c @@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) host->can_queue = base_vha->req->length + 128; host->this_id = 255; host->cmd_per_lun = 3; - if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) + if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; diff --git a/trunk/drivers/scsi/qla2xxx/qla_nx.c b/trunk/drivers/scsi/qla2xxx/qla_nx.c index 049807cda419..5cbf33a50b14 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_nx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_nx.c @@ -2208,7 +2208,6 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; - unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2219,11 +2218,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) ha = rsp->hw; reg = &ha->iobase->isp82; - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irq(&ha->hardware_lock); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(®->host_int, 0); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irq(&ha->hardware_lock); return IRQ_HANDLED; } @@ -2839,16 +2838,6 @@ qla82xx_start_scsi(srb_t *sp) int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - /* build FCP_CMND IU */ - memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); - int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); - ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; - - if (cmd->sc_data_direction == DMA_TO_DEVICE) - ctx->fcp_cmnd->additional_cdb_len |= 1; - else if (cmd->sc_data_direction == DMA_FROM_DEVICE) - ctx->fcp_cmnd->additional_cdb_len |= 2; - /* * Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -2865,6 +2854,16 @@ qla82xx_start_scsi(srb_t *sp) } } + /* build FCP_CMND IU */ + memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c04..e02df276804e 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -106,21 +106,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to report for target devices."); /* Do not change the value of this after module load */ -int ql2xenabledif = 0; +int ql2xenabledif = 1; module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenabledif, " Enable T10-CRC-DIF " - " Default is 0 - No DIF Support. 1 - Enable it" - ", 2 - Enable DIF for all types, except Type 0."); + " Default is 0 - No DIF Support. 1 - Enable it"); -int ql2xenablehba_err_chk = 2; +int ql2xenablehba_err_chk; module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenablehba_err_chk, - " Enable T10-CRC-DIF Error isolation by HBA:\n" - " Default is 1.\n" - " 0 -- Error isolation disabled\n" - " 1 -- Error isolation enabled only for DIX Type 0\n" - " 2 -- Error isolation enabled for all Types\n"); + " Enable T10-CRC-DIF Error isolation by HBA" + " Default is 0 - Error isolation disabled, 1 - Enable it"); int ql2xiidmaenable=1; module_param(ql2xiidmaenable, int, S_IRUGO); @@ -913,14 +909,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) "Abort command mbx success.\n"); wait = 1; } - - spin_lock_irqsave(&ha->hardware_lock, flags); qla2x00_sp_compl(ha, sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - /* Did the command return during mailbox execution? */ - if (ret == FAILED && !CMD_SP(cmd)) - ret = SUCCESS; /* Wait for the command to be returned. */ if (wait) { @@ -2262,7 +2251,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->this_id = 255; host->cmd_per_lun = 3; host->unique_id = host->host_no; - if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) + if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; @@ -2389,16 +2378,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) "Detected hba at address=%p.\n", ha); - if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { + if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) { - int prot = 0; base_vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_init, base_vha, 0x00f1, "Registering for DIF/DIX type 1 and 3 protection.\n"); - if (ql2xenabledif == 1) - prot = SHOST_DIX_TYPE0_PROTECTION; scsi_host_set_prot(host, - prot | SHOST_DIF_TYPE1_PROTECTION + SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h index 13b6357c1fa2..062c97bf62f5 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_version.h +++ b/trunk/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.07.07-k" +#define QLA2XXX_VERSION "8.03.07.03-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.c b/trunk/drivers/target/iscsi/iscsi_target_parameters.c index 5b773160200f..497b2e718a76 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.c +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.c @@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules( u8 DataSequenceInOrder = 0; u8 ErrorRecoveryLevel = 0, SessionType = 0; u8 IFMarker = 0, OFMarker = 0; - u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1; + u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; u32 FirstBurstLength = 0, MaxBurstLength = 0; struct iscsi_param *param = NULL; diff --git a/trunk/drivers/target/iscsi/iscsi_target_util.c b/trunk/drivers/target/iscsi/iscsi_target_util.c index f00137f377b2..a0d23bc0fc98 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_util.c +++ b/trunk/drivers/target/iscsi/iscsi_target_util.c @@ -874,6 +874,40 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess) spin_unlock_bh(&sess->session_usage_lock); } +/* + * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker + * array counts needed for sync and steering. + */ +static int iscsit_determine_sync_and_steering_counts( + struct iscsi_conn *conn, + struct iscsi_data_count *count) +{ + u32 length = count->data_length; + u32 marker, markint; + + count->sync_and_steering = 1; + + marker = (count->type == ISCSI_RX_DATA) ? + conn->of_marker : conn->if_marker; + markint = (count->type == ISCSI_RX_DATA) ? + (conn->conn_ops->OFMarkInt * 4) : + (conn->conn_ops->IFMarkInt * 4); + count->ss_iov_count = count->iov_count; + + while (length > 0) { + if (length >= marker) { + count->ss_iov_count += 3; + count->ss_marker_count += 2; + + length -= marker; + marker = markint; + } else + length = 0; + } + + return 0; +} + /* * Setup conn->if_marker and conn->of_marker values based upon * the initial marker-less interval. (see iSCSI v19 A.2) @@ -1256,7 +1290,7 @@ int iscsit_fe_sendpage_sg( struct kvec iov; u32 tx_hdr_size, data_len; u32 offset = cmd->first_data_sg_off; - int tx_sent, iov_off; + int tx_sent; send_hdr: tx_hdr_size = ISCSI_HDR_LEN; @@ -1276,19 +1310,9 @@ int iscsit_fe_sendpage_sg( } data_len = cmd->tx_size - tx_hdr_size - cmd->padding; - /* - * Set iov_off used by padding and data digest tx_data() calls below - * in order to determine proper offset into cmd->iov_data[] - */ - if (conn->conn_ops->DataDigest) { + if (conn->conn_ops->DataDigest) data_len -= ISCSI_CRC_LEN; - if (cmd->padding) - iov_off = (cmd->iov_data_count - 2); - else - iov_off = (cmd->iov_data_count - 1); - } else { - iov_off = (cmd->iov_data_count - 1); - } + /* * Perform sendpage() for each page in the scatterlist */ @@ -1317,7 +1341,8 @@ int iscsit_fe_sendpage_sg( send_padding: if (cmd->padding) { - struct kvec *iov_p = &cmd->iov_data[iov_off++]; + struct kvec *iov_p = + &cmd->iov_data[cmd->iov_data_count-1]; tx_sent = tx_data(conn, iov_p, 1, cmd->padding); if (cmd->padding != tx_sent) { @@ -1331,7 +1356,8 @@ int iscsit_fe_sendpage_sg( send_datacrc: if (conn->conn_ops->DataDigest) { - struct kvec *iov_d = &cmd->iov_data[iov_off]; + struct kvec *iov_d = + &cmd->iov_data[cmd->iov_data_count]; tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); if (ISCSI_CRC_LEN != tx_sent) { @@ -1405,7 +1431,8 @@ static int iscsit_do_rx_data( struct iscsi_data_count *count) { int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; - struct kvec *iov_p; + u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; + struct kvec iov[count->ss_iov_count], *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) @@ -1413,8 +1440,93 @@ static int iscsit_do_rx_data( memset(&msg, 0, sizeof(struct msghdr)); - iov_p = count->iov; - iov_len = count->iov_count; + if (count->sync_and_steering) { + int size = 0; + u32 i, orig_iov_count = 0; + u32 orig_iov_len = 0, orig_iov_loc = 0; + u32 iov_count = 0, per_iov_bytes = 0; + u32 *rx_marker, old_rx_marker = 0; + struct kvec *iov_record; + + memset(&rx_marker_val, 0, + count->ss_marker_count * sizeof(u32)); + memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); + + iov_record = count->iov; + orig_iov_count = count->iov_count; + rx_marker = &conn->of_marker; + + i = 0; + size = data; + orig_iov_len = iov_record[orig_iov_loc].iov_len; + while (size > 0) { + pr_debug("rx_data: #1 orig_iov_len %u," + " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); + pr_debug("rx_data: #2 rx_marker %u, size" + " %u\n", *rx_marker, size); + + if (orig_iov_len >= *rx_marker) { + iov[iov_count].iov_len = *rx_marker; + iov[iov_count++].iov_base = + (iov_record[orig_iov_loc].iov_base + + per_iov_bytes); + + iov[iov_count].iov_len = (MARKER_SIZE / 2); + iov[iov_count++].iov_base = + &rx_marker_val[rx_marker_iov++]; + iov[iov_count].iov_len = (MARKER_SIZE / 2); + iov[iov_count++].iov_base = + &rx_marker_val[rx_marker_iov++]; + old_rx_marker = *rx_marker; + + /* + * OFMarkInt is in 32-bit words. + */ + *rx_marker = (conn->conn_ops->OFMarkInt * 4); + size -= old_rx_marker; + orig_iov_len -= old_rx_marker; + per_iov_bytes += old_rx_marker; + + pr_debug("rx_data: #3 new_rx_marker" + " %u, size %u\n", *rx_marker, size); + } else { + iov[iov_count].iov_len = orig_iov_len; + iov[iov_count++].iov_base = + (iov_record[orig_iov_loc].iov_base + + per_iov_bytes); + + per_iov_bytes = 0; + *rx_marker -= orig_iov_len; + size -= orig_iov_len; + + if (size) + orig_iov_len = + iov_record[++orig_iov_loc].iov_len; + + pr_debug("rx_data: #4 new_rx_marker" + " %u, size %u\n", *rx_marker, size); + } + } + data += (rx_marker_iov * (MARKER_SIZE / 2)); + + iov_p = &iov[0]; + iov_len = iov_count; + + if (iov_count > count->ss_iov_count) { + pr_err("iov_count: %d, count->ss_iov_count:" + " %d\n", iov_count, count->ss_iov_count); + return -1; + } + if (rx_marker_iov > count->ss_marker_count) { + pr_err("rx_marker_iov: %d, count->ss_marker" + "_count: %d\n", rx_marker_iov, + count->ss_marker_count); + return -1; + } + } else { + iov_p = count->iov; + iov_len = count->iov_count; + } while (total_rx < data) { rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, @@ -1429,6 +1541,16 @@ static int iscsit_do_rx_data( rx_loop, total_rx, data); } + if (count->sync_and_steering) { + int j; + for (j = 0; j < rx_marker_iov; j++) { + pr_debug("rx_data: #5 j: %d, offset: %d\n", + j, rx_marker_val[j]); + conn->of_marker_offset = rx_marker_val[j]; + } + total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); + } + return total_rx; } @@ -1437,7 +1559,8 @@ static int iscsit_do_tx_data( struct iscsi_data_count *count) { int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; - struct kvec *iov_p; + u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; + struct kvec iov[count->ss_iov_count], *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) @@ -1450,8 +1573,98 @@ static int iscsit_do_tx_data( memset(&msg, 0, sizeof(struct msghdr)); - iov_p = count->iov; - iov_len = count->iov_count; + if (count->sync_and_steering) { + int size = 0; + u32 i, orig_iov_count = 0; + u32 orig_iov_len = 0, orig_iov_loc = 0; + u32 iov_count = 0, per_iov_bytes = 0; + u32 *tx_marker, old_tx_marker = 0; + struct kvec *iov_record; + + memset(&tx_marker_val, 0, + count->ss_marker_count * sizeof(u32)); + memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); + + iov_record = count->iov; + orig_iov_count = count->iov_count; + tx_marker = &conn->if_marker; + + i = 0; + size = data; + orig_iov_len = iov_record[orig_iov_loc].iov_len; + while (size > 0) { + pr_debug("tx_data: #1 orig_iov_len %u," + " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); + pr_debug("tx_data: #2 tx_marker %u, size" + " %u\n", *tx_marker, size); + + if (orig_iov_len >= *tx_marker) { + iov[iov_count].iov_len = *tx_marker; + iov[iov_count++].iov_base = + (iov_record[orig_iov_loc].iov_base + + per_iov_bytes); + + tx_marker_val[tx_marker_iov] = + (size - *tx_marker); + iov[iov_count].iov_len = (MARKER_SIZE / 2); + iov[iov_count++].iov_base = + &tx_marker_val[tx_marker_iov++]; + iov[iov_count].iov_len = (MARKER_SIZE / 2); + iov[iov_count++].iov_base = + &tx_marker_val[tx_marker_iov++]; + old_tx_marker = *tx_marker; + + /* + * IFMarkInt is in 32-bit words. + */ + *tx_marker = (conn->conn_ops->IFMarkInt * 4); + size -= old_tx_marker; + orig_iov_len -= old_tx_marker; + per_iov_bytes += old_tx_marker; + + pr_debug("tx_data: #3 new_tx_marker" + " %u, size %u\n", *tx_marker, size); + pr_debug("tx_data: #4 offset %u\n", + tx_marker_val[tx_marker_iov-1]); + } else { + iov[iov_count].iov_len = orig_iov_len; + iov[iov_count++].iov_base + = (iov_record[orig_iov_loc].iov_base + + per_iov_bytes); + + per_iov_bytes = 0; + *tx_marker -= orig_iov_len; + size -= orig_iov_len; + + if (size) + orig_iov_len = + iov_record[++orig_iov_loc].iov_len; + + pr_debug("tx_data: #5 new_tx_marker" + " %u, size %u\n", *tx_marker, size); + } + } + + data += (tx_marker_iov * (MARKER_SIZE / 2)); + + iov_p = &iov[0]; + iov_len = iov_count; + + if (iov_count > count->ss_iov_count) { + pr_err("iov_count: %d, count->ss_iov_count:" + " %d\n", iov_count, count->ss_iov_count); + return -1; + } + if (tx_marker_iov > count->ss_marker_count) { + pr_err("tx_marker_iov: %d, count->ss_marker" + "_count: %d\n", tx_marker_iov, + count->ss_marker_count); + return -1; + } + } else { + iov_p = count->iov; + iov_len = count->iov_count; + } while (total_tx < data) { tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, @@ -1466,6 +1679,9 @@ static int iscsit_do_tx_data( tx_loop, total_tx, data); } + if (count->sync_and_steering) + total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); + return total_tx; } @@ -1486,6 +1702,12 @@ int rx_data( c.data_length = data; c.type = ISCSI_RX_DATA; + if (conn->conn_ops->OFMarker && + (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { + if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) + return -1; + } + return iscsit_do_rx_data(conn, &c); } @@ -1506,6 +1728,12 @@ int tx_data( c.data_length = data; c.type = ISCSI_TX_DATA; + if (conn->conn_ops->IFMarker && + (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { + if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) + return -1; + } + return iscsit_do_tx_data(conn, &c); } diff --git a/trunk/drivers/target/target_core_cdb.c b/trunk/drivers/target/target_core_cdb.c index f04d4ef99dca..89ae923c5da6 100644 --- a/trunk/drivers/target/target_core_cdb.c +++ b/trunk/drivers/target/target_core_cdb.c @@ -24,7 +24,6 @@ */ #include -#include #include #include @@ -155,37 +154,6 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) return 0; } -static void -target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) -{ - unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; - unsigned char *buf = buf_off; - int cnt = 0, next = 1; - /* - * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on - * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field - * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION - * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL - * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure - * per device uniqeness. - */ - while (*p != '\0') { - if (cnt >= 13) - break; - if (!isxdigit(*p)) { - p++; - continue; - } - if (next != 0) { - buf[cnt++] |= hex_to_bin(*p++); - next = 0; - } else { - buf[cnt] = hex_to_bin(*p++) << 4; - next = 1; - } - } -} - /* * Device identification VPD, for a complete list of * DESIGNATOR TYPEs see spc4r17 Table 459. @@ -251,7 +219,8 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) * VENDOR_SPECIFIC_IDENTIFIER and * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION */ - target_parse_naa_6h_vendor_specific(dev, &buf[off]); + buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); + hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); len = 20; off = (len + 4); diff --git a/trunk/drivers/target/target_core_transport.c b/trunk/drivers/target/target_core_transport.c index a4b0a8d27f25..8d0c58ea6316 100644 --- a/trunk/drivers/target/target_core_transport.c +++ b/trunk/drivers/target/target_core_transport.c @@ -977,17 +977,15 @@ static void target_qf_do_work(struct work_struct *work) { struct se_device *dev = container_of(work, struct se_device, qf_work_queue); - LIST_HEAD(qf_cmd_list); struct se_cmd *cmd, *cmd_tmp; spin_lock_irq(&dev->qf_cmd_lock); - list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); - spin_unlock_irq(&dev->qf_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { - list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec(&dev->dev_qf_count); smp_mb__after_atomic_dec(); + spin_unlock_irq(&dev->qf_cmd_lock); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, @@ -999,7 +997,10 @@ static void target_qf_do_work(struct work_struct *work) * has been added to head of queue */ transport_add_cmd_to_queue(cmd, cmd->t_state); + + spin_lock_irq(&dev->qf_cmd_lock); } + spin_unlock_irq(&dev->qf_cmd_lock); } unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) diff --git a/trunk/drivers/target/tcm_fc/tcm_fc.h b/trunk/drivers/target/tcm_fc/tcm_fc.h index 3749d8b4b423..bd4fe21a23b8 100644 --- a/trunk/drivers/target/tcm_fc/tcm_fc.h +++ b/trunk/drivers/target/tcm_fc/tcm_fc.h @@ -98,7 +98,8 @@ struct ft_tpg { struct list_head list; /* linkage in ft_lport_acl tpg_list */ struct list_head lun_list; /* head of LUNs */ struct se_portal_group se_tpg; - struct workqueue_struct *workqueue; + struct task_struct *thread; /* processing thread */ + struct se_queue_obj qobj; /* queue for processing thread */ }; struct ft_lport_acl { @@ -109,10 +110,16 @@ struct ft_lport_acl { struct se_wwn fc_lport_wwn; }; +enum ft_cmd_state { + FC_CMD_ST_NEW = 0, + FC_CMD_ST_REJ +}; + /* * Commands */ struct ft_cmd { + enum ft_cmd_state state; u32 lun; /* LUN from request */ struct ft_sess *sess; /* session held for cmd */ struct fc_seq *seq; /* sequence in exchange mgr */ @@ -120,7 +127,7 @@ struct ft_cmd { struct fc_frame *req_frame; unsigned char *cdb; /* pointer to CDB inside frame */ u32 write_data_len; /* data received on writes */ - struct work_struct work; + struct se_queue_req se_req; /* Local sense buffer */ unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; u32 was_ddp_setup:1; /* Set only if ddp is setup */ @@ -170,6 +177,7 @@ int ft_is_state_remove(struct se_cmd *); /* * other internal functions. */ +int ft_thread(void *); void ft_recv_req(struct ft_sess *, struct fc_frame *); struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); diff --git a/trunk/drivers/target/tcm_fc/tfc_cmd.c b/trunk/drivers/target/tcm_fc/tfc_cmd.c index 80fbcde00cb6..5654dc22f7ae 100644 --- a/trunk/drivers/target/tcm_fc/tfc_cmd.c +++ b/trunk/drivers/target/tcm_fc/tfc_cmd.c @@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) int count; se_cmd = &cmd->se_cmd; - pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", - caller, cmd, cmd->sess, cmd->seq, se_cmd); + pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", + caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); pr_debug("%s: cmd %p cdb %p\n", caller, cmd, cmd->cdb); pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); @@ -90,6 +90,38 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); } +static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) +{ + struct ft_tpg *tpg = sess->tport->tpg; + struct se_queue_obj *qobj = &tpg->qobj; + unsigned long flags; + + qobj = &sess->tport->tpg->qobj; + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); + atomic_inc(&qobj->queue_cnt); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + wake_up_process(tpg->thread); +} + +static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) +{ + unsigned long flags; + struct se_queue_req *qr; + + spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + if (list_empty(&qobj->qobj_list)) { + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return NULL; + } + qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); + list_del(&qr->qr_list); + atomic_dec(&qobj->queue_cnt); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + return container_of(qr, struct ft_cmd, se_req); +} + static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; @@ -250,7 +282,9 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd) int ft_get_cmd_state(struct se_cmd *se_cmd) { - return 0; + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); + + return cmd->state; } int ft_is_state_remove(struct se_cmd *se_cmd) @@ -471,8 +505,6 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) return 0; } -static void ft_send_work(struct work_struct *work); - /* * Handle incoming FCP command. */ @@ -491,9 +523,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ - - INIT_WORK(&cmd->work, ft_send_work); - queue_work(sess->tport->tpg->workqueue, &cmd->work); + ft_queue_cmd(sess, cmd); return; busy: @@ -533,13 +563,12 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) /* * Send new command to target. */ -static void ft_send_work(struct work_struct *work) +static void ft_send_cmd(struct ft_cmd *cmd) { - struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct se_cmd *se_cmd; struct fcp_cmnd *fcp; - int data_dir = 0; + int data_dir; u32 data_len; int task_attr; int ret; @@ -646,3 +675,42 @@ static void ft_send_work(struct work_struct *work) err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); } + +/* + * Handle request in the command thread. + */ +static void ft_exec_req(struct ft_cmd *cmd) +{ + pr_debug("cmd state %x\n", cmd->state); + switch (cmd->state) { + case FC_CMD_ST_NEW: + ft_send_cmd(cmd); + break; + default: + break; + } +} + +/* + * Processing thread. + * Currently one thread per tpg. + */ +int ft_thread(void *arg) +{ + struct ft_tpg *tpg = arg; + struct se_queue_obj *qobj = &tpg->qobj; + struct ft_cmd *cmd; + + while (!kthread_should_stop()) { + schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); + if (kthread_should_stop()) + goto out; + + cmd = ft_dequeue_cmd(qobj); + if (cmd) + ft_exec_req(cmd); + } + +out: + return 0; +} diff --git a/trunk/drivers/target/tcm_fc/tfc_conf.c b/trunk/drivers/target/tcm_fc/tfc_conf.c index 8fa39b74f22c..b15879d43e22 100644 --- a/trunk/drivers/target/tcm_fc/tfc_conf.c +++ b/trunk/drivers/target/tcm_fc/tfc_conf.c @@ -327,6 +327,7 @@ static struct se_portal_group *ft_add_tpg( tpg->index = index; tpg->lport_acl = lacl; INIT_LIST_HEAD(&tpg->lun_list); + transport_init_queue_obj(&tpg->qobj); ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -335,8 +336,8 @@ static struct se_portal_group *ft_add_tpg( return NULL; } - tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); - if (!tpg->workqueue) { + tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); + if (IS_ERR(tpg->thread)) { kfree(tpg); return NULL; } @@ -355,7 +356,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) pr_debug("del tpg %s\n", config_item_name(&tpg->se_tpg.tpg_group.cg_item)); - destroy_workqueue(tpg->workqueue); + kthread_stop(tpg->thread); /* Wait for sessions to be freed thru RCU, for BUG_ON below */ synchronize_rcu(); diff --git a/trunk/drivers/target/tcm_fc/tfc_io.c b/trunk/drivers/target/tcm_fc/tfc_io.c index d35ea5a3d56c..c37f4cd96452 100644 --- a/trunk/drivers/target/tcm_fc/tfc_io.c +++ b/trunk/drivers/target/tcm_fc/tfc_io.c @@ -219,41 +219,43 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) if (cmd->was_ddp_setup) { BUG_ON(!ep); BUG_ON(!lport); - /* - * Since DDP (Large Rx offload) was setup for this request, - * payload is expected to be copied directly to user buffers. - */ - buf = fc_frame_payload_get(fp, 1); - if (buf) - pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " + } + + /* + * Doesn't expect payload if DDP is setup. Payload + * is expected to be copied directly to user buffers + * due to DDP (Large Rx offload), + */ + buf = fc_frame_payload_get(fp, 1); + if (buf) + pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " "cmd->sg_cnt 0x%x. DDP was setup" " hence not expected to receive frame with " - "payload, Frame will be dropped if" - "'Sequence Initiative' bit in f_ctl is" + "payload, Frame will be dropped if " + "'Sequence Initiative' bit in f_ctl is " "not set\n", __func__, ep->xid, f_ctl, cmd->sg, cmd->sg_cnt); - /* - * Invalidate HW DDP context if it was setup for respective - * command. Invalidation of HW DDP context is requited in both - * situation (success and error). - */ - ft_invl_hw_context(cmd); + /* + * Invalidate HW DDP context if it was setup for respective + * command. Invalidation of HW DDP context is requited in both + * situation (success and error). + */ + ft_invl_hw_context(cmd); - /* - * If "Sequence Initiative (TSI)" bit set in f_ctl, means last - * write data frame is received successfully where payload is - * posted directly to user buffer and only the last frame's - * header is posted in receive queue. - * - * If "Sequence Initiative (TSI)" bit is not set, means error - * condition w.r.t. DDP, hence drop the packet and let explict - * ABORTS from other end of exchange timer trigger the recovery. - */ - if (f_ctl & FC_FC_SEQ_INIT) - goto last_frame; - else - goto drop; - } + /* + * If "Sequence Initiative (TSI)" bit set in f_ctl, means last + * write data frame is received successfully where payload is + * posted directly to user buffer and only the last frame's + * header is posted in receive queue. + * + * If "Sequence Initiative (TSI)" bit is not set, means error + * condition w.r.t. DDP, hence drop the packet and let explict + * ABORTS from other end of exchange timer trigger the recovery. + */ + if (f_ctl & FC_FC_SEQ_INIT) + goto last_frame; + else + goto drop; rel_off = ntohl(fh->fh_parm_offset); frame_len = fr_len(fp); diff --git a/trunk/drivers/tty/serial/crisv10.c b/trunk/drivers/tty/serial/crisv10.c index 58be715913cd..225123b37f19 100644 --- a/trunk/drivers/tty/serial/crisv10.c +++ b/trunk/drivers/tty/serial/crisv10.c @@ -4450,7 +4450,7 @@ static int __init rs_init(void) #if defined(CONFIG_ETRAX_RS485) #if defined(CONFIG_ETRAX_RS485_ON_PA) - if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit, + if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, rs485_pa_bit)) { printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " "RS485 pin\n"); @@ -4459,7 +4459,7 @@ static int __init rs_init(void) } #endif #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) - if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit, + if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, rs485_port_g_bit)) { printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " "RS485 pin\n"); diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c index 7523719bf8a4..da70f5c32eb9 100644 --- a/trunk/drivers/xen/events.c +++ b/trunk/drivers/xen/events.c @@ -54,7 +54,7 @@ * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ -static DEFINE_MUTEX(irq_mapping_update_lock); +static DEFINE_SPINLOCK(irq_mapping_update_lock); static LIST_HEAD(xen_irq_list_head); @@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, int irq = -1; struct physdev_irq irq_op; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); irq = find_irq_by_gsi(gsi); if (irq != -1) { @@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, handle_edge_irq, name); out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return irq; } @@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, { int irq, ret; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); irq = xen_allocate_irq_dynamic(); if (irq == -1) @@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, if (ret < 0) goto error_irq; out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return irq; error_irq: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); xen_free_irq(irq); return -1; } @@ -740,7 +740,7 @@ int xen_destroy_irq(int irq) struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); desc = irq_to_desc(irq); if (!desc) @@ -766,7 +766,7 @@ int xen_destroy_irq(int irq) xen_free_irq(irq); out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return rc; } @@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq) struct irq_info *info; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); list_for_each_entry(info, &xen_irq_list_head, list) { if (info == NULL || info->type != IRQT_PIRQ) @@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq) } irq = -1; out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return irq; } @@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) { int irq; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); irq = evtchn_to_irq[evtchn]; @@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) } out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return irq; } @@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) struct evtchn_bind_ipi bind_ipi; int evtchn, irq; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); irq = per_cpu(ipi_to_irq, cpu)[ipi]; @@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) } out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return irq; } @@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) struct evtchn_bind_virq bind_virq; int evtchn, irq; - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); irq = per_cpu(virq_to_irq, cpu)[virq]; @@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) } out: - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); return irq; } @@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq) struct evtchn_close close; int evtchn = evtchn_from_irq(irq); - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); if (VALID_EVTCHN(evtchn)) { close.port = evtchn; @@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq) xen_free_irq(irq); - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); } int bind_evtchn_to_irqhandler(unsigned int evtchn, @@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq) will also be masked. */ disable_irq(irq); - mutex_lock(&irq_mapping_update_lock); + spin_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(evtchn_to_irq[evtchn] != -1); @@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq) xen_irq_info_evtchn_init(irq, evtchn); - mutex_unlock(&irq_mapping_update_lock); + spin_unlock(&irq_mapping_update_lock); /* new event channels are always bound to cpu 0 */ irq_set_affinity(irq, cpumask_of(0)); diff --git a/trunk/fs/hfsplus/super.c b/trunk/fs/hfsplus/super.c index d24a9b666a23..c106ca22e812 100644 --- a/trunk/fs/hfsplus/super.c +++ b/trunk/fs/hfsplus/super.c @@ -344,7 +344,6 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) struct inode *root, *inode; struct qstr str; struct nls_table *nls = NULL; - u64 last_fs_block, last_fs_page; int err; err = -EINVAL; @@ -400,13 +399,9 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (!sbi->rsrc_clump_blocks) sbi->rsrc_clump_blocks = 1; - err = -EFBIG; - last_fs_block = sbi->total_blocks - 1; - last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> - PAGE_CACHE_SHIFT; - - if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || - (last_fs_page > (pgoff_t)(~0ULL))) { + err = generic_check_addressable(sbi->alloc_blksz_shift, + sbi->total_blocks); + if (err) { printk(KERN_ERR "hfs: filesystem size too large.\n"); goto out_free_vhdr; } @@ -530,8 +525,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) out_close_ext_tree: hfs_btree_close(sbi->ext_tree); out_free_vhdr: - kfree(sbi->s_vhdr_buf); - kfree(sbi->s_backup_vhdr_buf); + kfree(sbi->s_vhdr); + kfree(sbi->s_backup_vhdr); out_unload_nls: unload_nls(sbi->nls); unload_nls(nls); diff --git a/trunk/fs/hfsplus/wrapper.c b/trunk/fs/hfsplus/wrapper.c index 7daf4b852d1c..10e515a0d452 100644 --- a/trunk/fs/hfsplus/wrapper.c +++ b/trunk/fs/hfsplus/wrapper.c @@ -272,9 +272,9 @@ int hfsplus_read_wrapper(struct super_block *sb) return 0; out_free_backup_vhdr: - kfree(sbi->s_backup_vhdr_buf); + kfree(sbi->s_backup_vhdr); out_free_vhdr: - kfree(sbi->s_vhdr_buf); + kfree(sbi->s_vhdr); out: return error; } diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index f4788365ea22..b52bc685465f 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -2616,7 +2616,6 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) if (!dir->i_op->rmdir) return -EPERM; - dget(dentry); mutex_lock(&dentry->d_inode->i_mutex); error = -EBUSY; @@ -2637,7 +2636,6 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) out: mutex_unlock(&dentry->d_inode->i_mutex); - dput(dentry); if (!error) d_delete(dentry); return error; @@ -3027,7 +3025,6 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, if (error) return error; - dget(new_dentry); if (target) mutex_lock(&target->i_mutex); @@ -3048,7 +3045,6 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, out: if (target) mutex_unlock(&target->i_mutex); - dput(new_dentry); if (!error) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry,new_dentry); diff --git a/trunk/fs/nfs/nfs4_fs.h b/trunk/fs/nfs/nfs4_fs.h index 3e93e9a1bee1..1ec1a85fa71c 100644 --- a/trunk/fs/nfs/nfs4_fs.h +++ b/trunk/fs/nfs/nfs4_fs.h @@ -56,9 +56,6 @@ enum nfs4_session_state { NFS4_SESSION_DRAINING, }; -#define NFS4_RENEW_TIMEOUT 0x01 -#define NFS4_RENEW_DELEGATION_CB 0x02 - struct nfs4_minor_version_ops { u32 minor_version; @@ -228,7 +225,7 @@ struct nfs4_state_recovery_ops { }; struct nfs4_state_maintenance_ops { - int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned); + int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *); struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *); int (*renew_lease)(struct nfs_client *, struct rpc_cred *); }; @@ -240,6 +237,8 @@ extern const struct inode_operations nfs4_dir_inode_operations; extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); +extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); +extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); @@ -350,7 +349,6 @@ extern void nfs4_close_sync(struct nfs4_state *, fmode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_lease_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); -extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp); extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs41_handle_recall_slot(struct nfs_client *clp); diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index 4700fae1ada0..8c77039e7a81 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -3374,13 +3374,9 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata) if (task->tk_status < 0) { /* Unless we're shutting down, schedule state recovery! */ - if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) - return; - if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { + if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) nfs4_schedule_lease_recovery(clp); - return; - } - nfs4_schedule_path_down_recovery(clp); + return; } do_renew_lease(clp, timestamp); } @@ -3390,7 +3386,7 @@ static const struct rpc_call_ops nfs4_renew_ops = { .rpc_release = nfs4_renew_release, }; -static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) +int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], @@ -3399,11 +3395,9 @@ static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, }; struct nfs4_renewdata *data; - if (renew_flags == 0) - return 0; if (!atomic_inc_not_zero(&clp->cl_count)) return -EIO; - data = kmalloc(sizeof(*data), GFP_NOFS); + data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->client = clp; @@ -3412,7 +3406,7 @@ static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, &nfs4_renew_ops, data); } -static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) +int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], @@ -5510,13 +5504,11 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ return rpc_run_task(&task_setup_data); } -static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) +static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred) { struct rpc_task *task; int ret = 0; - if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) - return 0; task = _nfs41_proc_sequence(clp, cred); if (IS_ERR(task)) ret = PTR_ERR(task); diff --git a/trunk/fs/nfs/nfs4renewd.c b/trunk/fs/nfs/nfs4renewd.c index dc484c0eae7f..df8e7f3ca56d 100644 --- a/trunk/fs/nfs/nfs4renewd.c +++ b/trunk/fs/nfs/nfs4renewd.c @@ -60,7 +60,6 @@ nfs4_renew_state(struct work_struct *work) struct rpc_cred *cred; long lease; unsigned long last, now; - unsigned renew_flags = 0; ops = clp->cl_mvops->state_renewal_ops; dprintk("%s: start\n", __func__); @@ -73,23 +72,18 @@ nfs4_renew_state(struct work_struct *work) last = clp->cl_last_renewal; now = jiffies; /* Are we close to a lease timeout? */ - if (time_after(now, last + lease/3)) - renew_flags |= NFS4_RENEW_TIMEOUT; - if (nfs_delegations_present(clp)) - renew_flags |= NFS4_RENEW_DELEGATION_CB; - - if (renew_flags != 0) { + if (time_after(now, last + lease/3)) { cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { - if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) { + if (!nfs_delegations_present(clp)) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); goto out; } nfs_expire_all_delegations(clp); } else { /* Queue an asynchronous RENEW. */ - ops->sched_state_renewal(clp, cred, renew_flags); + ops->sched_state_renewal(clp, cred); put_rpccred(cred); goto out_exp; } diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c index 39914be40b03..72ab97ef3d61 100644 --- a/trunk/fs/nfs/nfs4state.c +++ b/trunk/fs/nfs/nfs4state.c @@ -1038,12 +1038,6 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } -void nfs4_schedule_path_down_recovery(struct nfs_client *clp) -{ - nfs_handle_cb_pathdown(clp); - nfs4_schedule_state_manager(clp); -} - static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { diff --git a/trunk/fs/nfs/super.c b/trunk/fs/nfs/super.c index 9b7dd7013b15..b961ceac66b4 100644 --- a/trunk/fs/nfs/super.c +++ b/trunk/fs/nfs/super.c @@ -2035,6 +2035,9 @@ static inline void nfs_initialise_sb(struct super_block *sb) sb->s_blocksize = nfs_block_bits(server->wsize, &sb->s_blocksize_bits); + if (server->flags & NFS_MOUNT_NOAC) + sb->s_flags |= MS_SYNCHRONOUS; + sb->s_bdi = &server->backing_dev_info; nfs_super_set_maxbytes(sb, server->maxfilesize); @@ -2246,10 +2249,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; - /* -o noac implies -o sync */ - if (server->flags & NFS_MOUNT_NOAC) - sb_mntdata.mntflags |= MS_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -2362,10 +2361,6 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; - /* -o noac implies -o sync */ - if (server->flags & NFS_MOUNT_NOAC) - sb_mntdata.mntflags |= MS_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -2633,10 +2628,6 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS4_MOUNT_UNSHARED) compare_super = NULL; - /* -o noac implies -o sync */ - if (server->flags & NFS_MOUNT_NOAC) - sb_mntdata.mntflags |= MS_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -2925,10 +2916,6 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS4_MOUNT_UNSHARED) compare_super = NULL; - /* -o noac implies -o sync */ - if (server->flags & NFS_MOUNT_NOAC) - sb_mntdata.mntflags |= MS_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { @@ -3016,10 +3003,6 @@ nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags, if (server->flags & NFS4_MOUNT_UNSHARED) compare_super = NULL; - /* -o noac implies -o sync */ - if (server->flags & NFS_MOUNT_NOAC) - sb_mntdata.mntflags |= MS_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); if (IS_ERR(s)) { diff --git a/trunk/fs/nfs/write.c b/trunk/fs/nfs/write.c index c9bd2a6b7d4b..b39b37f80913 100644 --- a/trunk/fs/nfs/write.c +++ b/trunk/fs/nfs/write.c @@ -958,7 +958,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head if (!data) goto out_bad; data->pagevec[0] = page; - nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags); + nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags); list_add(&data->list, res); requests++; nbytes -= len; diff --git a/trunk/fs/xfs/xfs_aops.c b/trunk/fs/xfs/xfs_aops.c index 8c37dde4c521..63e971e2b837 100644 --- a/trunk/fs/xfs/xfs_aops.c +++ b/trunk/fs/xfs/xfs_aops.c @@ -1300,7 +1300,6 @@ xfs_end_io_direct_write( bool is_async) { struct xfs_ioend *ioend = iocb->private; - struct inode *inode = ioend->io_inode; /* * blockdev_direct_IO can return an error even after the I/O @@ -1332,7 +1331,7 @@ xfs_end_io_direct_write( } /* XXX: probably should move into the real I/O completion handler */ - inode_dio_done(inode); + inode_dio_done(ioend->io_inode); } STATIC ssize_t diff --git a/trunk/include/linux/basic_mmio_gpio.h b/trunk/include/linux/basic_mmio_gpio.h index feb912196745..98999cf107ce 100644 --- a/trunk/include/linux/basic_mmio_gpio.h +++ b/trunk/include/linux/basic_mmio_gpio.h @@ -63,10 +63,15 @@ static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) return container_of(gc, struct bgpio_chip, gc); } -int bgpio_remove(struct bgpio_chip *bgc); -int bgpio_init(struct bgpio_chip *bgc, struct device *dev, - unsigned long sz, void __iomem *dat, void __iomem *set, - void __iomem *clr, void __iomem *dirout, void __iomem *dirin, - bool big_endian); +int __devexit bgpio_remove(struct bgpio_chip *bgc); +int __devinit bgpio_init(struct bgpio_chip *bgc, + struct device *dev, + unsigned long sz, + void __iomem *dat, + void __iomem *set, + void __iomem *clr, + void __iomem *dirout, + void __iomem *dirin, + bool big_endian); #endif /* __BASIC_MMIO_GPIO_H */ diff --git a/trunk/include/linux/memcontrol.h b/trunk/include/linux/memcontrol.h index 343bd7661f2a..3b535db00a94 100644 --- a/trunk/include/linux/memcontrol.h +++ b/trunk/include/linux/memcontrol.h @@ -39,6 +39,16 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct mem_cgroup *mem_cont, int active, int file); +struct memcg_scanrecord { + struct mem_cgroup *mem; /* scanend memory cgroup */ + struct mem_cgroup *root; /* scan target hierarchy root */ + int context; /* scanning context (see memcontrol.c) */ + unsigned long nr_scanned[2]; /* the number of scanned pages */ + unsigned long nr_rotated[2]; /* the number of rotated pages */ + unsigned long nr_freed[2]; /* the number of freed pages */ + unsigned long elapsed; /* nsec of time elapsed while scanning */ +}; + #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* * All "charge" functions with gfp_mask should use GFP_KERNEL or @@ -117,6 +127,15 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page); extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap, + struct memcg_scanrecord *rec); +extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, + gfp_t gfp_mask, bool noswap, + struct zone *zone, + struct memcg_scanrecord *rec, + unsigned long *nr_scanned); + #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; #endif diff --git a/trunk/include/linux/swap.h b/trunk/include/linux/swap.h index c71f84bb62ec..14d62490922e 100644 --- a/trunk/include/linux/swap.h +++ b/trunk/include/linux/swap.h @@ -252,12 +252,6 @@ static inline void lru_cache_add_file(struct page *page) extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, int mode, int file); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap); -extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap, - struct zone *zone, - unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 1783aabc6128..25fb1b0e53fa 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -2412,13 +2412,8 @@ void drain_workqueue(struct workqueue_struct *wq) for_each_cwq_cpu(cpu, wq) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - bool drained; - spin_lock_irq(&cwq->gcwq->lock); - drained = !cwq->nr_active && list_empty(&cwq->delayed_works); - spin_unlock_irq(&cwq->gcwq->lock); - - if (drained) + if (!cwq->nr_active && list_empty(&cwq->delayed_works)) continue; if (++flush_cnt == 10 || diff --git a/trunk/lib/sha1.c b/trunk/lib/sha1.c index 1de509a159c8..f33271dd00cb 100644 --- a/trunk/lib/sha1.c +++ b/trunk/lib/sha1.c @@ -8,7 +8,6 @@ #include #include #include -#include #include /* diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index 7771871fa353..645a080ba4df 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -827,14 +827,13 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, { unsigned int i; unsigned int ret; - unsigned int nr_found, nr_skip; + unsigned int nr_found; rcu_read_lock(); restart: nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, (void ***)pages, NULL, start, nr_pages); ret = 0; - nr_skip = 0; for (i = 0; i < nr_found; i++) { struct page *page; repeat: @@ -857,7 +856,6 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, * here as an exceptional entry: so skip over it - * we only reach this from invalidate_mapping_pages(). */ - nr_skip++; continue; } @@ -878,7 +876,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, * If all entries were removed before we could secure them, * try again, because callers stop trying once 0 is returned. */ - if (unlikely(!ret && nr_found > nr_skip)) + if (unlikely(!ret && nr_found)) goto restart; rcu_read_unlock(); return ret; diff --git a/trunk/mm/memcontrol.c b/trunk/mm/memcontrol.c index 3508777837c7..ebd1e86bef1c 100644 --- a/trunk/mm/memcontrol.c +++ b/trunk/mm/memcontrol.c @@ -204,6 +204,50 @@ struct mem_cgroup_eventfd_list { static void mem_cgroup_threshold(struct mem_cgroup *mem); static void mem_cgroup_oom_notify(struct mem_cgroup *mem); +enum { + SCAN_BY_LIMIT, + SCAN_BY_SYSTEM, + NR_SCAN_CONTEXT, + SCAN_BY_SHRINK, /* not recorded now */ +}; + +enum { + SCAN, + SCAN_ANON, + SCAN_FILE, + ROTATE, + ROTATE_ANON, + ROTATE_FILE, + FREED, + FREED_ANON, + FREED_FILE, + ELAPSED, + NR_SCANSTATS, +}; + +struct scanstat { + spinlock_t lock; + unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; + unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; +}; + +const char *scanstat_string[NR_SCANSTATS] = { + "scanned_pages", + "scanned_anon_pages", + "scanned_file_pages", + "rotated_pages", + "rotated_anon_pages", + "rotated_file_pages", + "freed_pages", + "freed_anon_pages", + "freed_file_pages", + "elapsed_ns", +}; +#define SCANSTAT_WORD_LIMIT "_by_limit" +#define SCANSTAT_WORD_SYSTEM "_by_system" +#define SCANSTAT_WORD_HIERARCHY "_under_hierarchy" + + /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -269,7 +313,8 @@ struct mem_cgroup { /* For oom notifier event fd */ struct list_head oom_notify; - + /* For recording LRU-scan statistics */ + struct scanstat scanstat; /* * Should we move charges of a task when a task is moved into this * mem_cgroup ? And what type of charges should we move ? @@ -1633,6 +1678,44 @@ bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap) } #endif +static void __mem_cgroup_record_scanstat(unsigned long *stats, + struct memcg_scanrecord *rec) +{ + + stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; + stats[SCAN_ANON] += rec->nr_scanned[0]; + stats[SCAN_FILE] += rec->nr_scanned[1]; + + stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; + stats[ROTATE_ANON] += rec->nr_rotated[0]; + stats[ROTATE_FILE] += rec->nr_rotated[1]; + + stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; + stats[FREED_ANON] += rec->nr_freed[0]; + stats[FREED_FILE] += rec->nr_freed[1]; + + stats[ELAPSED] += rec->elapsed; +} + +static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) +{ + struct mem_cgroup *mem; + int context = rec->context; + + if (context >= NR_SCAN_CONTEXT) + return; + + mem = rec->mem; + spin_lock(&mem->scanstat.lock); + __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); + spin_unlock(&mem->scanstat.lock); + + mem = rec->root; + spin_lock(&mem->scanstat.lock); + __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); + spin_unlock(&mem->scanstat.lock); +} + /* * Scan the hierarchy if needed to reclaim memory. We remember the last child * we reclaimed from, so that we don't end up penalizing one child extensively @@ -1657,8 +1740,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; + struct memcg_scanrecord rec; unsigned long excess; - unsigned long nr_scanned; + unsigned long scanned; excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; @@ -1666,6 +1750,15 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, if (!check_soft && !shrink && root_mem->memsw_is_minimum) noswap = true; + if (shrink) + rec.context = SCAN_BY_SHRINK; + else if (check_soft) + rec.context = SCAN_BY_SYSTEM; + else + rec.context = SCAN_BY_LIMIT; + + rec.root = root_mem; + while (1) { victim = mem_cgroup_select_victim(root_mem); if (victim == root_mem) { @@ -1706,14 +1799,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, css_put(&victim->css); continue; } + rec.mem = victim; + rec.nr_scanned[0] = 0; + rec.nr_scanned[1] = 0; + rec.nr_rotated[0] = 0; + rec.nr_rotated[1] = 0; + rec.nr_freed[0] = 0; + rec.nr_freed[1] = 0; + rec.elapsed = 0; /* we use swappiness of local cgroup */ if (check_soft) { ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, - noswap, zone, &nr_scanned); - *total_scanned += nr_scanned; + noswap, zone, &rec, &scanned); + *total_scanned += scanned; } else ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, - noswap); + noswap, &rec); + mem_cgroup_record_scanstat(&rec); css_put(&victim->css); /* * At shrinking usage, we can't check we should stop here or @@ -3752,14 +3854,18 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) /* try to free all pages in this cgroup */ shrink = 1; while (nr_retries && mem->res.usage > 0) { + struct memcg_scanrecord rec; int progress; if (signal_pending(current)) { ret = -EINTR; goto out; } + rec.context = SCAN_BY_SHRINK; + rec.mem = mem; + rec.root = mem; progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, - false); + false, &rec); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ @@ -4603,6 +4709,54 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file) } #endif /* CONFIG_NUMA */ +static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, + struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); + char string[64]; + int i; + + for (i = 0; i < NR_SCANSTATS; i++) { + strcpy(string, scanstat_string[i]); + strcat(string, SCANSTAT_WORD_LIMIT); + cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]); + } + + for (i = 0; i < NR_SCANSTATS; i++) { + strcpy(string, scanstat_string[i]); + strcat(string, SCANSTAT_WORD_SYSTEM); + cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]); + } + + for (i = 0; i < NR_SCANSTATS; i++) { + strcpy(string, scanstat_string[i]); + strcat(string, SCANSTAT_WORD_LIMIT); + strcat(string, SCANSTAT_WORD_HIERARCHY); + cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); + } + for (i = 0; i < NR_SCANSTATS; i++) { + strcpy(string, scanstat_string[i]); + strcat(string, SCANSTAT_WORD_SYSTEM); + strcat(string, SCANSTAT_WORD_HIERARCHY); + cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); + } + return 0; +} + +static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, + unsigned int event) +{ + struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); + + spin_lock(&mem->scanstat.lock); + memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); + memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); + spin_unlock(&mem->scanstat.lock); + return 0; +} + + static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", @@ -4673,6 +4827,11 @@ static struct cftype mem_cgroup_files[] = { .mode = S_IRUGO, }, #endif + { + .name = "vmscan_stat", + .read_map = mem_cgroup_vmscan_stat_read, + .trigger = mem_cgroup_reset_vmscan_stat, + }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -4936,6 +5095,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) atomic_set(&mem->refcnt, 1); mem->move_charge_at_immigrate = 0; mutex_init(&mem->thresholds_lock); + spin_lock_init(&mem->scanstat.lock); return &mem->css; free_out: __mem_cgroup_free(mem); diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index 9c51f9f58cac..8b57173c1dd5 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; + pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; @@ -648,9 +649,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); + pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, - vma->anon_vma, vma->vm_file, vma->vm_pgoff, - new_pol); + vma->anon_vma, vma->vm_file, pgoff, new_pol); if (prev) { vma = prev; next = vma->vm_next; @@ -1411,9 +1412,7 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy, err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); if (!err && nmask) { - unsigned long copy_size; - copy_size = min_t(unsigned long, sizeof(bm), alloc_size); - err = copy_from_user(bm, nm, copy_size); + err = copy_from_user(bm, nm, alloc_size); /* ensure entire bitmap is zeroed */ err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); err |= compat_put_bitmap(nmask, bm, nr_bits); diff --git a/trunk/mm/vmalloc.c b/trunk/mm/vmalloc.c index 5016f19e1661..7ef0903058ee 100644 --- a/trunk/mm/vmalloc.c +++ b/trunk/mm/vmalloc.c @@ -2140,14 +2140,6 @@ struct vm_struct *alloc_vm_area(size_t size) return NULL; } - /* - * If the allocated address space is passed to a hypercall - * before being used then we cannot rely on a page fault to - * trigger an update of the page tables. So sync all the page - * tables here. - */ - vmalloc_sync_all(); - return area; } EXPORT_SYMBOL_GPL(alloc_vm_area); diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index b55699cd9067..b7719ec10dc5 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -105,6 +105,7 @@ struct scan_control { /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; + struct memcg_scanrecord *memcg_record; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes @@ -1348,6 +1349,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc, int file = is_file_lru(lru); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; + if (!scanning_global_lru(sc)) + sc->memcg_record->nr_rotated[file] += numpages; } if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); @@ -1391,6 +1394,10 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone, reclaim_stat->recent_scanned[0] += *nr_anon; reclaim_stat->recent_scanned[1] += *nr_file; + if (!scanning_global_lru(sc)) { + sc->memcg_record->nr_scanned[0] += *nr_anon; + sc->memcg_record->nr_scanned[1] += *nr_file; + } } /* @@ -1504,6 +1511,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, nr_reclaimed += shrink_page_list(&page_list, zone, sc); } + if (!scanning_global_lru(sc)) + sc->memcg_record->nr_freed[file] += nr_reclaimed; + local_irq_disable(); if (current_is_kswapd()) __count_vm_events(KSWAPD_STEAL, nr_reclaimed); @@ -1603,6 +1613,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, } reclaim_stat->recent_scanned[file] += nr_taken; + if (!scanning_global_lru(sc)) + sc->memcg_record->nr_scanned[file] += nr_taken; __count_zone_vm_events(PGREFILL, zone, pgscanned); if (file) @@ -1654,6 +1666,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * get_scan_ratio. */ reclaim_stat->recent_rotated[file] += nr_rotated; + if (!scanning_global_lru(sc)) + sc->memcg_record->nr_rotated[file] += nr_rotated; move_active_pages_to_lru(zone, &l_active, LRU_ACTIVE + file * LRU_FILE); @@ -1794,15 +1808,23 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, u64 fraction[2], denominator; enum lru_list l; int noswap = 0; - bool force_scan = false; + int force_scan = 0; unsigned long nr_force_scan[2]; - /* kswapd does zone balancing and needs to scan this zone */ - if (scanning_global_lru(sc) && current_is_kswapd()) - force_scan = true; - /* memcg may have small limit and need to avoid priority drop */ - if (!scanning_global_lru(sc)) - force_scan = true; + + anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); + file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); + + if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { + /* kswapd does zone balancing and need to scan this zone */ + if (scanning_global_lru(sc) && current_is_kswapd()) + force_scan = 1; + /* memcg may have small limit and need to avoid priority drop */ + if (!scanning_global_lru(sc)) + force_scan = 1; + } /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || (nr_swap_pages <= 0)) { @@ -1815,11 +1837,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, goto out; } - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); - file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); - if (scanning_global_lru(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, @@ -2251,9 +2268,10 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap, - struct zone *zone, - unsigned long *nr_scanned) + gfp_t gfp_mask, bool noswap, + struct zone *zone, + struct memcg_scanrecord *rec, + unsigned long *scanned) { struct scan_control sc = { .nr_scanned = 0, @@ -2263,7 +2281,9 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, .may_swap = !noswap, .order = 0, .mem_cgroup = mem, + .memcg_record = rec, }; + ktime_t start, end; sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -2272,6 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, sc.may_writepage, sc.gfp_mask); + start = ktime_get(); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. @@ -2280,19 +2301,25 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, * the priority and make it zero. */ shrink_zone(0, zone, &sc); + end = ktime_get(); + + if (rec) + rec->elapsed += ktime_to_ns(ktime_sub(end, start)); + *scanned = sc.nr_scanned; trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); - *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, gfp_t gfp_mask, - bool noswap) + bool noswap, + struct memcg_scanrecord *rec) { struct zonelist *zonelist; unsigned long nr_reclaimed; + ktime_t start, end; int nid; struct scan_control sc = { .may_writepage = !laptop_mode, @@ -2301,6 +2328,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .nr_to_reclaim = SWAP_CLUSTER_MAX, .order = 0, .mem_cgroup = mem_cont, + .memcg_record = rec, .nodemask = NULL, /* we don't care the placement */ .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), @@ -2309,6 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .gfp_mask = sc.gfp_mask, }; + start = ktime_get(); /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the @@ -2323,6 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); + end = ktime_get(); + if (rec) + rec->elapsed += ktime_to_ns(ktime_sub(end, start)); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); diff --git a/trunk/mm/vmstat.c b/trunk/mm/vmstat.c index d52b13d28e8f..20c18b7694b2 100644 --- a/trunk/mm/vmstat.c +++ b/trunk/mm/vmstat.c @@ -659,7 +659,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, } #endif -#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) +#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) #ifdef CONFIG_ZONE_DMA #define TEXT_FOR_DMA(xx) xx "_dma", #else @@ -788,7 +788,7 @@ const char * const vmstat_text[] = { #endif /* CONFIG_VM_EVENTS_COUNTERS */ }; -#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ +#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */ #ifdef CONFIG_PROC_FS diff --git a/trunk/sound/core/pcm_lib.c b/trunk/sound/core/pcm_lib.c index 62e90b862a0d..86d0caf91b35 100644 --- a/trunk/sound/core/pcm_lib.c +++ b/trunk/sound/core/pcm_lib.c @@ -1761,10 +1761,6 @@ static int wait_for_avail(struct snd_pcm_substream *substream, snd_pcm_uframes_t avail = 0; long wait_time, tout; - init_waitqueue_entry(&wait, current); - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&runtime->tsleep, &wait); - if (runtime->no_period_wakeup) wait_time = MAX_SCHEDULE_TIMEOUT; else { @@ -1775,32 +1771,16 @@ static int wait_for_avail(struct snd_pcm_substream *substream, } wait_time = msecs_to_jiffies(wait_time * 1000); } - + init_waitqueue_entry(&wait, current); + add_wait_queue(&runtime->tsleep, &wait); for (;;) { if (signal_pending(current)) { err = -ERESTARTSYS; break; } - - /* - * We need to check if space became available already - * (and thus the wakeup happened already) first to close - * the race of space already having become available. - * This check must happen after been added to the waitqueue - * and having current state be INTERRUPTIBLE. - */ - if (is_playback) - avail = snd_pcm_playback_avail(runtime); - else - avail = snd_pcm_capture_avail(runtime); - if (avail >= runtime->twake) - break; snd_pcm_stream_unlock_irq(substream); - - tout = schedule_timeout(wait_time); - + tout = schedule_timeout_interruptible(wait_time); snd_pcm_stream_lock_irq(substream); - set_current_state(TASK_INTERRUPTIBLE); switch (runtime->status->state) { case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; @@ -1826,9 +1806,14 @@ static int wait_for_avail(struct snd_pcm_substream *substream, err = -EIO; break; } + if (is_playback) + avail = snd_pcm_playback_avail(runtime); + else + avail = snd_pcm_capture_avail(runtime); + if (avail >= runtime->twake) + break; } _endloop: - set_current_state(TASK_RUNNING); remove_wait_queue(&runtime->tsleep, &wait); *availp = avail; return err; diff --git a/trunk/sound/pci/hda/hda_codec.c b/trunk/sound/pci/hda/hda_codec.c index f3aefef37216..3e7850c238c3 100644 --- a/trunk/sound/pci/hda/hda_codec.c +++ b/trunk/sound/pci/hda/hda_codec.c @@ -579,13 +579,9 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux, return -1; } recursive++; - for (i = 0; i < nums; i++) { - unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i])); - if (type == AC_WID_PIN || type == AC_WID_AUD_OUT) - continue; + for (i = 0; i < nums; i++) if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0) return i; - } return -1; } EXPORT_SYMBOL_HDA(snd_hda_get_conn_index); diff --git a/trunk/sound/pci/hda/patch_cirrus.c b/trunk/sound/pci/hda/patch_cirrus.c index c45f3e69bcf0..d6c93d92b550 100644 --- a/trunk/sound/pci/hda/patch_cirrus.c +++ b/trunk/sound/pci/hda/patch_cirrus.c @@ -535,7 +535,7 @@ static int add_volume(struct hda_codec *codec, const char *name, int index, unsigned int pval, int dir, struct snd_kcontrol **kctlp) { - char tmp[44]; + char tmp[32]; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT); knew.private_value = pval; diff --git a/trunk/sound/soc/blackfin/bf5xx-ad193x.c b/trunk/sound/soc/blackfin/bf5xx-ad193x.c index 5956584ea3a4..a118a0fb9d81 100644 --- a/trunk/sound/soc/blackfin/bf5xx-ad193x.c +++ b/trunk/sound/soc/blackfin/bf5xx-ad193x.c @@ -103,7 +103,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = { .cpu_dai_name = "bfin-tdm.0", .codec_dai_name ="ad193x-hifi", .platform_name = "bfin-tdm-pcm-audio", - .codec_name = "spi0.5", + .codec_name = "ad193x.5", .ops = &bf5xx_ad193x_ops, }, { @@ -112,7 +112,7 @@ static struct snd_soc_dai_link bf5xx_ad193x_dai[] = { .cpu_dai_name = "bfin-tdm.1", .codec_dai_name ="ad193x-hifi", .platform_name = "bfin-tdm-pcm-audio", - .codec_name = "spi0.5", + .codec_name = "ad193x.5", .ops = &bf5xx_ad193x_ops, }, }; diff --git a/trunk/sound/soc/fsl/mpc5200_dma.c b/trunk/sound/soc/fsl/mpc5200_dma.c index 5c6c2457386e..fd0dc46afc34 100644 --- a/trunk/sound/soc/fsl/mpc5200_dma.c +++ b/trunk/sound/soc/fsl/mpc5200_dma.c @@ -369,7 +369,7 @@ static struct snd_soc_platform_driver mpc5200_audio_dma_platform = { .pcm_free = &psc_dma_free, }; -static int mpc5200_hpcd_probe(struct platform_device *op) +static int mpc5200_hpcd_probe(struct of_device *op) { phys_addr_t fifo; struct psc_dma *psc_dma; @@ -487,7 +487,7 @@ static int mpc5200_hpcd_probe(struct platform_device *op) return ret; } -static int mpc5200_hpcd_remove(struct platform_device *op) +static int mpc5200_hpcd_remove(struct of_device *op) { struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); @@ -519,7 +519,7 @@ MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match); static struct platform_driver mpc5200_hpcd_of_driver = { .probe = mpc5200_hpcd_probe, .remove = mpc5200_hpcd_remove, - .driver = { + .dev = { .owner = THIS_MODULE, .name = "mpc5200-pcm-audio", .of_match_table = mpc5200_hpcd_match, diff --git a/trunk/sound/soc/imx/imx-pcm-fiq.c b/trunk/sound/soc/imx/imx-pcm-fiq.c index 7945625e0e08..309c59e6fb6c 100644 --- a/trunk/sound/soc/imx/imx-pcm-fiq.c +++ b/trunk/sound/soc/imx/imx-pcm-fiq.c @@ -240,6 +240,7 @@ static int ssi_irq = 0; static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) { + struct snd_card *card = rtd->card->snd_card; struct snd_soc_dai *dai = rtd->cpu_dai; struct snd_pcm *pcm = rtd->pcm; int ret; diff --git a/trunk/sound/soc/kirkwood/kirkwood-i2s.c b/trunk/sound/soc/kirkwood/kirkwood-i2s.c index d0bcf3fcea01..8f16cd37c2af 100644 --- a/trunk/sound/soc/kirkwood/kirkwood-i2s.c +++ b/trunk/sound/soc/kirkwood/kirkwood-i2s.c @@ -424,7 +424,7 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev) if (!priv->mem) { dev_err(&pdev->dev, "request_mem_region failed\n"); err = -EBUSY; - goto err_alloc; + goto error_alloc; } priv->io = ioremap(priv->mem->start, SZ_16K); diff --git a/trunk/sound/soc/soc-cache.c b/trunk/sound/soc/soc-cache.c index 20b7f3b003a3..d9f8aded51f3 100644 --- a/trunk/sound/soc/soc-cache.c +++ b/trunk/sound/soc/soc-cache.c @@ -203,14 +203,14 @@ static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec) rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); for (i = 0; i < rbnode->blklen; ++i) { regtmp = rbnode->base_reg + i; + WARN_ON(codec->writable_register && + codec->writable_register(codec, regtmp)); val = snd_soc_rbtree_get_register(rbnode, i); def = snd_soc_get_cache_val(codec->reg_def_copy, i, rbnode->word_size); if (val == def) continue; - WARN_ON(!snd_soc_codec_writable_register(codec, regtmp)); - codec->cache_bypass = 1; ret = snd_soc_write(codec, regtmp, val); codec->cache_bypass = 0; @@ -563,7 +563,8 @@ static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec) lzo_blocks = codec->reg_cache; for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { - WARN_ON(!snd_soc_codec_writable_register(codec, i)); + WARN_ON(codec->writable_register && + codec->writable_register(codec, i)); ret = snd_soc_cache_read(codec, i, &val); if (ret) return ret; @@ -822,6 +823,8 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) codec_drv = codec->driver; for (i = 0; i < codec_drv->reg_cache_size; ++i) { + WARN_ON(codec->writable_register && + codec->writable_register(codec, i)); ret = snd_soc_cache_read(codec, i, &val); if (ret) return ret; @@ -829,9 +832,6 @@ static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec) if (snd_soc_get_cache_val(codec->reg_def_copy, i, codec_drv->reg_word_size) == val) continue; - - WARN_ON(!snd_soc_codec_writable_register(codec, i)); - ret = snd_soc_write(codec, i, val); if (ret) return ret; diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c index d2ef014af215..b085d8e87574 100644 --- a/trunk/sound/soc/soc-core.c +++ b/trunk/sound/soc/soc-core.c @@ -1633,7 +1633,7 @@ int snd_soc_codec_readable_register(struct snd_soc_codec *codec, if (codec->readable_register) return codec->readable_register(codec, reg); else - return 1; + return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); @@ -1651,7 +1651,7 @@ int snd_soc_codec_writable_register(struct snd_soc_codec *codec, if (codec->writable_register) return codec->writable_register(codec, reg); else - return 1; + return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); diff --git a/trunk/sound/soc/soc-dapm.c b/trunk/sound/soc/soc-dapm.c index d67c637557a7..7e15914b3633 100644 --- a/trunk/sound/soc/soc-dapm.c +++ b/trunk/sound/soc/soc-dapm.c @@ -2763,7 +2763,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); /** * snd_soc_dapm_free - free dapm resources - * @dapm: DAPM context + * @card: SoC device * * Free all dapm widgets and resources. */ diff --git a/trunk/sound/soc/soc-jack.c b/trunk/sound/soc/soc-jack.c index fa31d9c2abd8..38b00131b2fe 100644 --- a/trunk/sound/soc/soc-jack.c +++ b/trunk/sound/soc/soc-jack.c @@ -105,7 +105,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) snd_soc_dapm_sync(dapm); - snd_jack_report(jack->jack, jack->status); + snd_jack_report(jack->jack, status); out: mutex_unlock(&codec->mutex);