From e87561ea18c1db2646da6c771f06197335d5edf1 Mon Sep 17 00:00:00 2001 From: Christoph Paasch Date: Sun, 7 Apr 2013 04:53:15 +0000 Subject: [PATCH] --- yaml --- r: 362370 b: refs/heads/master c: 50a75a8914539c5dcd441c5f54d237a666a426fd h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Makefile | 2 +- trunk/arch/alpha/Makefile | 2 +- trunk/arch/alpha/include/asm/floppy.h | 2 +- trunk/arch/alpha/kernel/irq.c | 7 + trunk/arch/alpha/kernel/irq_alpha.c | 10 +- trunk/arch/alpha/kernel/sys_nautilus.c | 5 - trunk/arch/alpha/kernel/sys_titan.c | 14 +- trunk/arch/arc/include/asm/irqflags.h | 12 +- trunk/arch/c6x/include/asm/irqflags.h | 2 +- trunk/arch/ia64/kernel/palinfo.c | 77 ++++- trunk/arch/powerpc/platforms/pseries/lpar.c | 8 +- trunk/arch/tile/include/asm/irqflags.h | 10 +- trunk/arch/x86/boot/compressed/Makefile | 5 +- trunk/arch/x86/include/asm/syscall.h | 4 +- trunk/arch/x86/kvm/lapic.c | 2 +- trunk/arch/x86/kvm/x86.c | 13 +- trunk/block/blk-sysfs.c | 2 - trunk/block/partition-generic.c | 1 + trunk/crypto/gcm.c | 17 +- trunk/drivers/acpi/pci_root.c | 76 ++-- trunk/drivers/ata/ata_piix.c | 14 +- trunk/drivers/ata/libata-core.c | 6 +- trunk/drivers/ata/libata-scsi.c | 8 +- trunk/drivers/block/loop.c | 21 +- trunk/drivers/block/mtip32xx/mtip32xx.c | 327 +++++------------- trunk/drivers/block/mtip32xx/mtip32xx.h | 18 +- trunk/drivers/crypto/ux500/cryp/cryp_core.c | 2 +- trunk/drivers/eisa/pci_eisa.c | 67 ++-- trunk/drivers/gpu/drm/mgag200/mgag200_mode.c | 13 +- .../gpu/drm/nouveau/core/subdev/bios/base.c | 17 - trunk/drivers/gpu/drm/nouveau/nv50_display.c | 2 +- trunk/drivers/gpu/drm/radeon/radeon_bios.c | 26 -- trunk/drivers/hwspinlock/hwspinlock_core.c | 2 - trunk/drivers/md/dm-cache-target.c | 51 +-- trunk/drivers/pci/pci-acpi.c | 15 +- trunk/drivers/pci/pci-driver.c | 5 +- trunk/drivers/pci/pcie/portdrv_pci.c | 13 + trunk/drivers/pci/rom.c | 67 ++-- trunk/drivers/remoteproc/Kconfig | 2 +- trunk/drivers/remoteproc/remoteproc_core.c | 6 +- trunk/drivers/remoteproc/ste_modem_rproc.c | 7 +- trunk/drivers/vfio/pci/vfio_pci.c | 3 +- trunk/fs/ecryptfs/miscdev.c | 14 +- trunk/fs/namespace.c | 2 +- trunk/fs/nfs/nfs4client.c | 45 +-- trunk/fs/nfs/nfs4proc.c | 1 - trunk/fs/nfs/nfs4state.c | 8 +- trunk/fs/proc/generic.c | 119 ++----- trunk/include/linux/ata.h | 2 +- trunk/include/linux/ftrace.h | 2 - trunk/include/linux/kvm_host.h | 2 +- trunk/include/linux/kvm_types.h | 1 - trunk/include/linux/libata.h | 1 - trunk/include/linux/pci.h | 1 - trunk/include/linux/preempt.h | 22 +- trunk/include/linux/proc_fs.h | 2 - trunk/include/linux/spinlock_up.h | 29 +- trunk/kernel/trace/ftrace.c | 13 +- trunk/kernel/trace/trace.c | 5 +- trunk/net/ipv6/tcp_ipv6.c | 1 + trunk/net/sunrpc/clnt.c | 11 +- trunk/virt/kvm/kvm_main.c | 47 +-- 63 files changed, 492 insertions(+), 801 deletions(-) diff --git a/[refs] b/[refs] index 7cb98bd57d88..4e63a1ee4f26 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: eb02db38ee6da074628685971042b847dee05d43 +refs/heads/master: 50a75a8914539c5dcd441c5f54d237a666a426fd diff --git a/trunk/Makefile b/trunk/Makefile index 6db672b15bda..58a165b02af1 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 9 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc5 NAME = Unicycling Gorilla # *DOCUMENTATION* diff --git a/trunk/arch/alpha/Makefile b/trunk/arch/alpha/Makefile index 2cc3cc519c54..4759fe751aa1 100644 --- a/trunk/arch/alpha/Makefile +++ b/trunk/arch/alpha/Makefile @@ -12,7 +12,7 @@ NM := $(NM) -B LDFLAGS_vmlinux := -static -N #-relax CHECKFLAGS += -D__alpha__ -m64 -cflags-y := -pipe -mno-fp-regs -ffixed-8 +cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data cflags-y += $(call cc-option, -fno-jump-tables) cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 diff --git a/trunk/arch/alpha/include/asm/floppy.h b/trunk/arch/alpha/include/asm/floppy.h index bae97eb19d26..46cefbd50e73 100644 --- a/trunk/arch/alpha/include/asm/floppy.h +++ b/trunk/arch/alpha/include/asm/floppy.h @@ -26,7 +26,7 @@ #define fd_disable_irq() disable_irq(FLOPPY_IRQ) #define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - 0, "floppy", NULL) + IRQF_DISABLED, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) #ifdef CONFIG_PCI diff --git a/trunk/arch/alpha/kernel/irq.c b/trunk/arch/alpha/kernel/irq.c index 7b2be251c30f..2872accd2215 100644 --- a/trunk/arch/alpha/kernel/irq.c +++ b/trunk/arch/alpha/kernel/irq.c @@ -117,6 +117,13 @@ handle_irq(int irq) return; } + /* + * From here we must proceed with IPL_MAX. Note that we do not + * explicitly enable interrupts afterwards - some MILO PALcode + * (namely LX164 one) seems to have severe problems with RTI + * at IPL 0. + */ + local_irq_disable(); irq_enter(); generic_handle_irq_desc(irq, desc); irq_exit(); diff --git a/trunk/arch/alpha/kernel/irq_alpha.c b/trunk/arch/alpha/kernel/irq_alpha.c index f433fc11877a..772ddfdb71a8 100644 --- a/trunk/arch/alpha/kernel/irq_alpha.c +++ b/trunk/arch/alpha/kernel/irq_alpha.c @@ -45,14 +45,6 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; - - /* - * Disable interrupts during IRQ handling. - * Note that there is no matching local_irq_enable() due to - * severe problems with RTI at IPL0 and some MILO PALcode - * (namely LX164). - */ - local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP @@ -70,6 +62,7 @@ do_entInt(unsigned long type, unsigned long vector, { long cpu; + local_irq_disable(); smp_percpu_timer_interrupt(regs); cpu = smp_processor_id(); if (cpu != boot_cpuid) { @@ -229,6 +222,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, struct irqaction timer_irqaction = { .handler = timer_interrupt, + .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/trunk/arch/alpha/kernel/sys_nautilus.c b/trunk/arch/alpha/kernel/sys_nautilus.c index 1383f8601a93..4d4c046f708d 100644 --- a/trunk/arch/alpha/kernel/sys_nautilus.c +++ b/trunk/arch/alpha/kernel/sys_nautilus.c @@ -188,10 +188,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); -static struct resource irongate_io = { - .name = "Irongate PCI IO", - .flags = IORESOURCE_IO, -}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -213,7 +209,6 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; - bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/trunk/arch/alpha/kernel/sys_titan.c b/trunk/arch/alpha/kernel/sys_titan.c index a53cf03f49d5..5cf4a481b8c5 100644 --- a/trunk/arch/alpha/kernel/sys_titan.c +++ b/trunk/arch/alpha/kernel/sys_titan.c @@ -280,15 +280,15 @@ titan_late_init(void) * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ - titan_request_irq(63+16, titan_intr_nop, 0, + titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, "CChip Error", NULL); - titan_request_irq(62+16, titan_intr_nop, 0, + titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, "PChip 0 H_Error", NULL); - titan_request_irq(61+16, titan_intr_nop, 0, + titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, "PChip 1 H_Error", NULL); - titan_request_irq(60+16, titan_intr_nop, 0, + titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, "PChip 0 C_Error", NULL); - titan_request_irq(59+16, titan_intr_nop, 0, + titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, "PChip 1 C_Error", NULL); /* @@ -348,9 +348,9 @@ privateer_init_pci(void) * Hook a couple of extra err interrupts that the * common titan code won't. */ - titan_request_irq(53+16, titan_intr_nop, 0, + titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, "NMI", NULL); - titan_request_irq(50+16, titan_intr_nop, 0, + titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, "Temperature Warning", NULL); /* diff --git a/trunk/arch/arc/include/asm/irqflags.h b/trunk/arch/arc/include/asm/irqflags.h index eac071668201..ccd84806b62f 100644 --- a/trunk/arch/arc/include/asm/irqflags.h +++ b/trunk/arch/arc/include/asm/irqflags.h @@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) " flag.nz %0 \n" : "=r"(temp), "=r"(flags) : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) - : "memory", "cc"); + : "cc"); return flags; } @@ -53,8 +53,7 @@ static inline void arch_local_irq_restore(unsigned long flags) __asm__ __volatile__( " flag %0 \n" : - : "r"(flags) - : "memory"); + : "r"(flags)); } /* @@ -74,8 +73,7 @@ static inline void arch_local_irq_disable(void) " and %0, %0, %1 \n" " flag %0 \n" : "=&r"(temp) - : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) - : "memory"); + : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); } /* @@ -87,9 +85,7 @@ static inline long arch_local_save_flags(void) __asm__ __volatile__( " lr %0, [status32] \n" - : "=&r"(temp) - : - : "memory"); + : "=&r"(temp)); return temp; } diff --git a/trunk/arch/c6x/include/asm/irqflags.h b/trunk/arch/c6x/include/asm/irqflags.h index 2c71d5634ec2..cf78e09e18c3 100644 --- a/trunk/arch/c6x/include/asm/irqflags.h +++ b/trunk/arch/c6x/include/asm/irqflags.h @@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) /* set interrupt enabled status */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); + asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); } /* unconditionally enable interrupts */ diff --git a/trunk/arch/ia64/kernel/palinfo.c b/trunk/arch/ia64/kernel/palinfo.c index 79521d5499f9..77597e5ea60a 100644 --- a/trunk/arch/ia64/kernel/palinfo.c +++ b/trunk/arch/ia64/kernel/palinfo.c @@ -849,6 +849,17 @@ static palinfo_entry_t palinfo_entries[]={ #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) +/* + * this array is used to keep track of the proc entries we create. This is + * required in the module mode when we need to remove all entries. The procfs code + * does not do recursion of deletion + * + * Notes: + * - +1 accounts for the cpuN directory entry in /proc/pal + */ +#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) + +static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; static struct proc_dir_entry *palinfo_dir; /* @@ -960,32 +971,60 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi static void __cpuinit create_palinfo_proc_entries(unsigned int cpu) { +# define CPUSTR "cpu%d" + pal_func_cpu_u_t f; + struct proc_dir_entry **pdir; struct proc_dir_entry *cpu_dir; int j; - char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ - sprintf(cpustr, "cpu%d", cpu); + char cpustr[sizeof(CPUSTR)]; + + + /* + * we keep track of created entries in a depth-first order for + * cleanup purposes. Each entry is stored into palinfo_proc_entries + */ + sprintf(cpustr,CPUSTR, cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); - if (!cpu_dir) - return; f.req_cpu = cpu; + /* + * Compute the location to store per cpu entries + * We dont store the top level entry in this list, but + * remove it finally after removing all cpu entries. + */ + pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; + *pdir++ = cpu_dir; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; - create_proc_read_entry( - palinfo_entries[j].name, 0, cpu_dir, - palinfo_read_entry, (void *)f.value); + *pdir = create_proc_read_entry( + palinfo_entries[j].name, 0, cpu_dir, + palinfo_read_entry, (void *)f.value); + pdir++; } } static void remove_palinfo_proc_entries(unsigned int hcpu) { - char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ - sprintf(cpustr, "cpu%d", hcpu); - remove_proc_subtree(cpustr, palinfo_dir); + int j; + struct proc_dir_entry *cpu_dir, **pdir; + + pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; + cpu_dir = *pdir; + *pdir++=NULL; + for (j=0; j < (NR_PALINFO_ENTRIES); j++) { + if ((*pdir)) { + remove_proc_entry ((*pdir)->name, cpu_dir); + *pdir ++= NULL; + } + } + + if (cpu_dir) { + remove_proc_entry(cpu_dir->name, palinfo_dir); + } } static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1019,8 +1058,6 @@ palinfo_init(void) printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); - if (!palinfo_dir) - return -ENOMEM; /* Create palinfo dirs in /proc for all online cpus */ for_each_online_cpu(i) { @@ -1036,8 +1073,22 @@ palinfo_init(void) static void __exit palinfo_exit(void) { + int i = 0; + + /* remove all nodes: depth first pass. Could optimize this */ + for_each_online_cpu(i) { + remove_palinfo_proc_entries(i); + } + + /* + * Remove the top level entry finally + */ + remove_proc_entry(palinfo_dir->name, NULL); + + /* + * Unregister from cpu notifier callbacks + */ unregister_hotcpu_notifier(&palinfo_cpu_notifier); - remove_proc_subtree("pal", NULL); } module_init(palinfo_init); diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c index 299731e9036b..0da39fed355a 100644 --- a/trunk/arch/powerpc/platforms/pseries/lpar.c +++ b/trunk/arch/powerpc/platforms/pseries/lpar.c @@ -186,13 +186,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - - /* - * The test for adjunct partition is performed before the - * ANDCOND test. H_RESOURCE may be returned, so we need to - * check for that as well. - */ - BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); + BUG_ON(lpar_rc != H_NOT_FOUND); slot_offset++; slot_offset &= 0x7; diff --git a/trunk/arch/tile/include/asm/irqflags.h b/trunk/arch/tile/include/asm/irqflags.h index c96f9bbb760d..241c0bb60b12 100644 --- a/trunk/arch/tile/include/asm/irqflags.h +++ b/trunk/arch/tile/include/asm/irqflags.h @@ -40,15 +40,7 @@ #include #include -/* - * Set and clear kernel interrupt masks. - * - * NOTE: __insn_mtspr() is a compiler builtin marked as a memory - * clobber. We rely on it being equivalent to a compiler barrier in - * this code since arch_local_irq_save() and friends must act as - * compiler barriers. This compiler semantic is baked into enough - * places that the compiler will maintain it going forward. - */ +/* Set and clear kernel interrupt masks. */ #if CHIP_HAS_SPLIT_INTR_MASK() #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 # error Fix assumptions about which word various interrupts are in diff --git a/trunk/arch/x86/boot/compressed/Makefile b/trunk/arch/x86/boot/compressed/Makefile index 5ef205c5f37b..8a84501acb1b 100644 --- a/trunk/arch/x86/boot/compressed/Makefile +++ b/trunk/arch/x86/boot/compressed/Makefile @@ -4,7 +4,7 @@ # create a compressed vmlinux image from the original vmlinux # -targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo +targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC @@ -29,6 +29,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/piggy.o $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone +$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o @@ -42,7 +43,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs +targets += vmlinux.bin.all vmlinux.relocs CMD_RELOCS = arch/x86/tools/relocs quiet_cmd_relocs = RELOCS $@ diff --git a/trunk/arch/x86/include/asm/syscall.h b/trunk/arch/x86/include/asm/syscall.h index 2e188d68397c..1ace47b62592 100644 --- a/trunk/arch/x86/include/asm/syscall.h +++ b/trunk/arch/x86/include/asm/syscall.h @@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; */ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return regs->orig_ax; + return regs->orig_ax & __SYSCALL_MASK; } static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { - regs->ax = regs->orig_ax; + regs->ax = regs->orig_ax & __SYSCALL_MASK; } static inline long syscall_get_error(struct task_struct *task, diff --git a/trunk/arch/x86/kvm/lapic.c b/trunk/arch/x86/kvm/lapic.c index f77df1c5de6e..02b51dd4e4ad 100644 --- a/trunk/arch/x86/kvm/lapic.c +++ b/trunk/arch/x86/kvm/lapic.c @@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) if (!pv_eoi_enabled(vcpu)) return 0; return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr, sizeof(u8)); + addr); } void kvm_lapic_init(void) diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index e1721324c271..f19ac0aca60d 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -1823,8 +1823,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, - sizeof(u32))) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1953,9 +1952,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) gpa_offset = data & ~(PAGE_MASK | 1); + /* Check that the address is 32-byte aligned. */ + if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) + break; + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, - &vcpu->arch.pv_time, data & ~1ULL, - sizeof(struct pvclock_vcpu_time_info))) + &vcpu->arch.pv_time, data & ~1ULL)) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; @@ -1975,8 +1977,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, - data & KVM_STEAL_VALID_BITS, - sizeof(struct kvm_steal_time))) + data & KVM_STEAL_VALID_BITS)) return 1; vcpu->arch.st.msr_val = data; diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c index 5efc5a647183..6206a934eb8c 100644 --- a/trunk/block/blk-sysfs.c +++ b/trunk/block/blk-sysfs.c @@ -229,8 +229,6 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ - if (ret < 0) \ - return ret; \ if (neg) \ val = !val; \ \ diff --git a/trunk/block/partition-generic.c b/trunk/block/partition-generic.c index 789cdea05893..ae95ee6a58aa 100644 --- a/trunk/block/partition-generic.c +++ b/trunk/block/partition-generic.c @@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno) hd_struct_put(part); } +EXPORT_SYMBOL(delete_partition); static ssize_t whole_disk_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/trunk/crypto/gcm.c b/trunk/crypto/gcm.c index 13ccbda34ff9..137ad1ec5438 100644 --- a/trunk/crypto/gcm.c +++ b/trunk/crypto/gcm.c @@ -44,7 +44,6 @@ struct crypto_rfc4543_ctx { struct crypto_rfc4543_req_ctx { u8 auth_tag[16]; - u8 assocbuf[32]; struct scatterlist cipher[1]; struct scatterlist payload[2]; struct scatterlist assoc[2]; @@ -1134,19 +1133,9 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); - if (req->assoc->length == req->assoclen) { - sg_init_table(assoc, 2); - sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, - req->assoc->offset); - } else { - BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); - - scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, - req->assoclen, 0); - - sg_init_table(assoc, 2); - sg_set_buf(assoc, rctx->assocbuf, req->assoclen); - } + sg_init_table(assoc, 2); + sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, + req->assoc->offset); scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c index 6ae5e440436e..5ff173066127 100644 --- a/trunk/drivers/acpi/pci_root.c +++ b/trunk/drivers/acpi/pci_root.c @@ -415,6 +415,7 @@ static int acpi_pci_root_add(struct acpi_device *device, struct acpi_pci_root *root; struct acpi_pci_driver *driver; u32 flags, base_flags; + bool is_osc_granted = false; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -475,30 +476,6 @@ static int acpi_pci_root_add(struct acpi_device *device, flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; acpi_pci_osc_support(root, flags); - /* - * TBD: Need PCI interface for enumeration/configuration of roots. - */ - - mutex_lock(&acpi_pci_root_lock); - list_add_tail(&root->node, &acpi_pci_roots); - mutex_unlock(&acpi_pci_root_lock); - - /* - * Scan the Root Bridge - * -------------------- - * Must do this prior to any attempt to bind the root device, as the - * PCI namespace does not get created until this call is made (and - * thus the root bridge's pci_dev does not exist). - */ - root->bus = pci_acpi_scan_root(root); - if (!root->bus) { - printk(KERN_ERR PREFIX - "Bus %04x:%02x not present in PCI namespace\n", - root->segment, (unsigned int)root->secondary.start); - result = -ENODEV; - goto out_del_root; - } - /* Indicate support for various _OSC capabilities. */ if (pci_ext_cfg_avail()) flags |= OSC_EXT_PCI_CONFIG_SUPPORT; @@ -517,7 +494,6 @@ static int acpi_pci_root_add(struct acpi_device *device, flags = base_flags; } } - if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL @@ -538,28 +514,54 @@ static int acpi_pci_root_add(struct acpi_device *device, status = acpi_pci_osc_control_set(device->handle, &flags, OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); if (ACPI_SUCCESS(status)) { + is_osc_granted = true; dev_info(&device->dev, "ACPI _OSC control (0x%02x) granted\n", flags); - if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { - /* - * We have ASPM control, but the FADT indicates - * that it's unsupported. Clear it. - */ - pcie_clear_aspm(root->bus); - } } else { + is_osc_granted = false; dev_info(&device->dev, "ACPI _OSC request failed (%s), " "returned control mask: 0x%02x\n", acpi_format_exception(status), flags); - pr_info("ACPI _OSC control for PCIe not granted, " - "disabling ASPM\n"); - pcie_no_aspm(); } } else { dev_info(&device->dev, - "Unable to request _OSC control " - "(_OSC support mask: 0x%02x)\n", flags); + "Unable to request _OSC control " + "(_OSC support mask: 0x%02x)\n", flags); + } + + /* + * TBD: Need PCI interface for enumeration/configuration of roots. + */ + + mutex_lock(&acpi_pci_root_lock); + list_add_tail(&root->node, &acpi_pci_roots); + mutex_unlock(&acpi_pci_root_lock); + + /* + * Scan the Root Bridge + * -------------------- + * Must do this prior to any attempt to bind the root device, as the + * PCI namespace does not get created until this call is made (and + * thus the root bridge's pci_dev does not exist). + */ + root->bus = pci_acpi_scan_root(root); + if (!root->bus) { + printk(KERN_ERR PREFIX + "Bus %04x:%02x not present in PCI namespace\n", + root->segment, (unsigned int)root->secondary.start); + result = -ENODEV; + goto out_del_root; + } + + /* ASPM setting */ + if (is_osc_granted) { + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) + pcie_clear_aspm(root->bus); + } else { + pr_info("ACPI _OSC control for PCIe not granted, " + "disabling ASPM\n"); + pcie_no_aspm(); } pci_acpi_add_bus_pm_notifier(device, root->bus); diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 2f48123d74c4..ffdd32d22602 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -150,7 +150,6 @@ enum piix_controller_ids { tolapai_sata, piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ ich8_sata_snb, - ich8_2port_sata_snb, }; struct piix_map_db { @@ -305,7 +304,7 @@ static const struct pci_device_id piix_pci_tbl[] = { /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, /* SATA Controller IDE (Lynx Point) */ - { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point-LP) */ @@ -440,7 +439,6 @@ static const struct piix_map_db *piix_map_db_table[] = { [ich8m_apple_sata] = &ich8m_apple_map_db, [tolapai_sata] = &tolapai_map_db, [ich8_sata_snb] = &ich8_map_db, - [ich8_2port_sata_snb] = &ich8_2port_map_db, }; static struct pci_bits piix_enable_bits[] = { @@ -1244,16 +1242,6 @@ static struct ata_port_info piix_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &piix_sata_ops, }, - - [ich8_2port_sata_snb] = - { - .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR - | PIIX_FLAG_PIO16, - .pio_mask = ATA_PIO4, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .port_ops = &piix_sata_ops, - }, }; #define AHCI_PCI_BAR 5 diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index 63c743baf920..497adea1f0d6 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) * from SATA Settings page of Identify Device Data Log. */ if (ata_id_has_devslp(dev->id)) { - u8 *sata_setting = ap->sector_buf; + u8 sata_setting[ATA_SECT_SIZE]; int i, j; dev->flags |= ATA_DFLAG_DEVSLP; @@ -2439,9 +2439,6 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); - if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) - dev->max_sectors = ATA_MAX_SECTORS_LBA48; - if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -4103,7 +4100,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, - { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, /* Devices we expect to fail diagnostics */ diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c index ff44787e5a45..318b41358187 100644 --- a/trunk/drivers/ata/libata-scsi.c +++ b/trunk/drivers/ata/libata-scsi.c @@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == RECOVERED_ERROR && - sshdr.asc == 0 && sshdr.ascq == 0x1d) + if (sshdr.sense_key == 0 && + sshdr.asc == 0 && sshdr.ascq == 0) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } @@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == RECOVERED_ERROR && - sshdr.asc == 0 && sshdr.ascq == 0x1d) + if (sshdr.sense_key == 0 && + sshdr.asc == 0 && sshdr.ascq == 0) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c index dfe758382eaf..2c127f9c3f3b 100644 --- a/trunk/drivers/block/loop.c +++ b/trunk/drivers/block/loop.c @@ -1051,12 +1051,29 @@ static int loop_clr_fd(struct loop_device *lo) lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); - if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) - ioctl_by_bdev(bdev, BLKRRPART, 0); lo->lo_flags = 0; if (!part_shift) lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; mutex_unlock(&lo->lo_ctl_mutex); + + /* + * Remove all partitions, since BLKRRPART won't remove user + * added partitions when max_part=0 + */ + if (bdev) { + struct disk_part_iter piter; + struct hd_struct *part; + + mutex_lock_nested(&bdev->bd_mutex, 1); + invalidate_partition(bdev->bd_disk, 0); + disk_part_iter_init(&piter, bdev->bd_disk, + DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) + delete_partition(bdev->bd_disk, part->partno); + disk_part_iter_exit(&piter); + mutex_unlock(&bdev->bd_mutex); + } + /* * Need not hold lo_ctl_mutex to fput backing file. * Calling fput holding lo_ctl_mutex triggers a circular diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c index 32c678028e53..92250af84e7d 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.c +++ b/trunk/drivers/block/mtip32xx/mtip32xx.c @@ -81,17 +81,12 @@ /* Device instance number, incremented each time a device is probed. */ static int instance; -struct list_head online_list; -struct list_head removing_list; -spinlock_t dev_lock; - /* * Global variable used to hold the major block device number * allocated in mtip_init(). */ static int mtip_major; static struct dentry *dfs_parent; -static struct dentry *dfs_device_status; static u32 cpu_use[NR_CPUS]; @@ -248,31 +243,40 @@ static inline void release_slot(struct mtip_port *port, int tag) /* * Reset the HBA (without sleeping) * + * Just like hba_reset, except does not call sleep, so can be + * run from interrupt/tasklet context. + * * @dd Pointer to the driver data structure. * * return value * 0 The reset was successful. * -1 The HBA Reset bit did not clear. */ -static int mtip_hba_reset(struct driver_data *dd) +static int hba_reset_nosleep(struct driver_data *dd) { unsigned long timeout; + /* Chip quirk: quiesce any chip function */ + mdelay(10); + /* Set the reset bit */ writel(HOST_RESET, dd->mmio + HOST_CTL); /* Flush */ readl(dd->mmio + HOST_CTL); - /* Spin for up to 2 seconds, waiting for reset acknowledgement */ - timeout = jiffies + msecs_to_jiffies(2000); - do { - mdelay(10); - if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) - return -1; + /* + * Wait 10ms then spin for up to 1 second + * waiting for reset acknowledgement + */ + timeout = jiffies + msecs_to_jiffies(1000); + mdelay(10); + while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) + && time_before(jiffies, timeout)) + mdelay(1); - } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) - && time_before(jiffies, timeout)); + if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) + return -1; if (readl(dd->mmio + HOST_CTL) & HOST_RESET) return -1; @@ -477,7 +481,7 @@ static void mtip_restart_port(struct mtip_port *port) dev_warn(&port->dd->pdev->dev, "PxCMD.CR not clear, escalating reset\n"); - if (mtip_hba_reset(port->dd)) + if (hba_reset_nosleep(port->dd)) dev_err(&port->dd->pdev->dev, "HBA reset escalation failed.\n"); @@ -523,26 +527,6 @@ static void mtip_restart_port(struct mtip_port *port) } -static int mtip_device_reset(struct driver_data *dd) -{ - int rv = 0; - - if (mtip_check_surprise_removal(dd->pdev)) - return 0; - - if (mtip_hba_reset(dd) < 0) - rv = -EFAULT; - - mdelay(1); - mtip_init_port(dd->port); - mtip_start_port(dd->port); - - /* Enable interrupts on the HBA. */ - writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, - dd->mmio + HOST_CTL); - return rv; -} - /* * Helper function for tag logging */ @@ -648,7 +632,7 @@ static void mtip_timeout_function(unsigned long int data) if (cmdto_cnt) { print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - mtip_device_reset(port->dd); + mtip_restart_port(port); wake_up_interruptible(&port->svc_wait); } clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); @@ -1299,11 +1283,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, int rv = 0, ready2go = 1; struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; unsigned long to; - struct driver_data *dd = port->dd; /* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) { - dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); + dev_err(&port->dd->pdev->dev, + "SG buffer is not 8 byte aligned\n"); return -EFAULT; } @@ -1316,21 +1300,23 @@ static int mtip_exec_internal_command(struct mtip_port *port, mdelay(100); } while (time_before(jiffies, to)); if (!ready2go) { - dev_warn(&dd->pdev->dev, + dev_warn(&port->dd->pdev->dev, "Internal cmd active. new cmd [%02X]\n", fis->command); return -EBUSY; } set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); port->ic_pause_timer = 0; - clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); - clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); + if (fis->command == ATA_CMD_SEC_ERASE_UNIT) + clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); + else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) + clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); if (atomic == GFP_KERNEL) { if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ if (mtip_quiesce_io(port, 5000) < 0) { - dev_warn(&dd->pdev->dev, + dev_warn(&port->dd->pdev->dev, "Failed to quiesce IO\n"); release_slot(port, MTIP_TAG_INTERNAL); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); @@ -1375,84 +1361,58 @@ static int mtip_exec_internal_command(struct mtip_port *port, /* Issue the command to the hardware */ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); + /* Poll if atomic, wait_for_completion otherwise */ if (atomic == GFP_KERNEL) { /* Wait for the command to complete or timeout. */ - if (wait_for_completion_interruptible_timeout( + if (wait_for_completion_timeout( &wait, - msecs_to_jiffies(timeout)) <= 0) { - if (rv == -ERESTARTSYS) { /* interrupted */ - dev_err(&dd->pdev->dev, - "Internal command [%02X] was interrupted after %lu ms\n", - fis->command, timeout); - rv = -EINTR; - goto exec_ic_exit; - } else if (rv == 0) /* timeout */ - dev_err(&dd->pdev->dev, - "Internal command did not complete [%02X] within timeout of %lu ms\n", - fis->command, timeout); - else - dev_err(&dd->pdev->dev, - "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", - fis->command, rv, timeout); - - if (mtip_check_surprise_removal(dd->pdev) || + msecs_to_jiffies(timeout)) == 0) { + dev_err(&port->dd->pdev->dev, + "Internal command did not complete [%d] " + "within timeout of %lu ms\n", + atomic, timeout); + if (mtip_check_surprise_removal(port->dd->pdev) || test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &dd->dd_flag)) { - dev_err(&dd->pdev->dev, - "Internal command [%02X] wait returned due to SR\n", - fis->command); + &port->dd->dd_flag)) { rv = -ENXIO; goto exec_ic_exit; } - mtip_device_reset(dd); /* recover from timeout issue */ rv = -EAGAIN; - goto exec_ic_exit; } } else { - u32 hba_stat, port_stat; - /* Spin for checking if command still outstanding */ timeout = jiffies + msecs_to_jiffies(timeout); while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) && time_before(jiffies, timeout)) { - if (mtip_check_surprise_removal(dd->pdev)) { + if (mtip_check_surprise_removal(port->dd->pdev)) { rv = -ENXIO; goto exec_ic_exit; } if ((fis->command != ATA_CMD_STANDBYNOW1) && test_bit(MTIP_DDF_REMOVE_PENDING_BIT, - &dd->dd_flag)) { + &port->dd->dd_flag)) { rv = -ENXIO; goto exec_ic_exit; } - port_stat = readl(port->mmio + PORT_IRQ_STAT); - if (!port_stat) - continue; - - if (port_stat & PORT_IRQ_ERR) { - dev_err(&dd->pdev->dev, - "Internal command [%02X] failed\n", - fis->command); - mtip_device_reset(dd); - rv = -EIO; - goto exec_ic_exit; - } else { - writel(port_stat, port->mmio + PORT_IRQ_STAT); - hba_stat = readl(dd->mmio + HOST_IRQ_STAT); - if (hba_stat) - writel(hba_stat, - dd->mmio + HOST_IRQ_STAT); + if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { + atomic_inc(&int_cmd->active); /* error */ + break; } - break; } } + if (atomic_read(&int_cmd->active) > 1) { + dev_err(&port->dd->pdev->dev, + "Internal command [%02X] failed\n", fis->command); + rv = -EIO; + } if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) { rv = -ENXIO; - if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { - mtip_device_reset(dd); + if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, + &port->dd->dd_flag)) { + mtip_restart_port(port); rv = -EAGAIN; } } @@ -1764,8 +1724,7 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, * -EINVAL Invalid parameters passed in, trim not supported * -EIO Error submitting trim request to hw */ -static int mtip_send_trim(struct driver_data *dd, unsigned int lba, - unsigned int len) +static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) { int i, rv = 0; u64 tlba, tlen, sect_left; @@ -1851,6 +1810,45 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) return (bool) !!port->identify_valid; } +/* + * Reset the HBA. + * + * Resets the HBA by setting the HBA Reset bit in the Global + * HBA Control register. After setting the HBA Reset bit the + * function waits for 1 second before reading the HBA Reset + * bit to make sure it has cleared. If HBA Reset is not clear + * an error is returned. Cannot be used in non-blockable + * context. + * + * @dd Pointer to the driver data structure. + * + * return value + * 0 The reset was successful. + * -1 The HBA Reset bit did not clear. + */ +static int mtip_hba_reset(struct driver_data *dd) +{ + mtip_deinit_port(dd->port); + + /* Set the reset bit */ + writel(HOST_RESET, dd->mmio + HOST_CTL); + + /* Flush */ + readl(dd->mmio + HOST_CTL); + + /* Wait for reset to clear */ + ssleep(1); + + /* Check the bit has cleared */ + if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { + dev_err(&dd->pdev->dev, + "Reset bit did not clear.\n"); + return -1; + } + + return 0; +} + /* * Display the identify command data. * @@ -2712,100 +2710,6 @@ static ssize_t mtip_hw_show_status(struct device *dev, static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); -/* debugsfs entries */ - -static ssize_t show_device_status(struct device_driver *drv, char *buf) -{ - int size = 0; - struct driver_data *dd, *tmp; - unsigned long flags; - char id_buf[42]; - u16 status = 0; - - spin_lock_irqsave(&dev_lock, flags); - size += sprintf(&buf[size], "Devices Present:\n"); - list_for_each_entry_safe(dd, tmp, &online_list, online_list) { - if (dd->pdev) { - if (dd->port && - dd->port->identify && - dd->port->identify_valid) { - strlcpy(id_buf, - (char *) (dd->port->identify + 10), 21); - status = *(dd->port->identify + 141); - } else { - memset(id_buf, 0, 42); - status = 0; - } - - if (dd->port && - test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { - size += sprintf(&buf[size], - " device %s %s (ftl rebuild %d %%)\n", - dev_name(&dd->pdev->dev), - id_buf, - status); - } else { - size += sprintf(&buf[size], - " device %s %s\n", - dev_name(&dd->pdev->dev), - id_buf); - } - } - } - - size += sprintf(&buf[size], "Devices Being Removed:\n"); - list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { - if (dd->pdev) { - if (dd->port && - dd->port->identify && - dd->port->identify_valid) { - strlcpy(id_buf, - (char *) (dd->port->identify+10), 21); - status = *(dd->port->identify + 141); - } else { - memset(id_buf, 0, 42); - status = 0; - } - - if (dd->port && - test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { - size += sprintf(&buf[size], - " device %s %s (ftl rebuild %d %%)\n", - dev_name(&dd->pdev->dev), - id_buf, - status); - } else { - size += sprintf(&buf[size], - " device %s %s\n", - dev_name(&dd->pdev->dev), - id_buf); - } - } - } - spin_unlock_irqrestore(&dev_lock, flags); - - return size; -} - -static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, - size_t len, loff_t *offset) -{ - int size = *offset; - char buf[MTIP_DFS_MAX_BUF_SIZE]; - - if (!len || *offset) - return 0; - - size += show_device_status(NULL, buf); - - *offset = size <= len ? size : len; - size = copy_to_user(ubuf, buf, *offset); - if (size) - return -EFAULT; - - return *offset; -} - static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, size_t len, loff_t *offset) { @@ -2900,13 +2804,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, return *offset; } -static const struct file_operations mtip_device_status_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = mtip_hw_read_device_status, - .llseek = no_llseek, -}; - static const struct file_operations mtip_regs_fops = { .owner = THIS_MODULE, .open = simple_open, @@ -4264,7 +4161,6 @@ static int mtip_pci_probe(struct pci_dev *pdev, const struct cpumask *node_mask; int cpu, i = 0, j = 0; int my_node = NUMA_NO_NODE; - unsigned long flags; /* Allocate memory for this devices private data. */ my_node = pcibus_to_node(pdev->bus); @@ -4322,9 +4218,6 @@ static int mtip_pci_probe(struct pci_dev *pdev, dd->pdev = pdev; dd->numa_node = my_node; - INIT_LIST_HEAD(&dd->online_list); - INIT_LIST_HEAD(&dd->remove_list); - memset(dd->workq_name, 0, 32); snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); @@ -4412,14 +4305,6 @@ static int mtip_pci_probe(struct pci_dev *pdev, instance++; if (rv != MTIP_FTL_REBUILD_MAGIC) set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); - else - rv = 0; /* device in rebuild state, return 0 from probe */ - - /* Add to online list even if in ftl rebuild */ - spin_lock_irqsave(&dev_lock, flags); - list_add(&dd->online_list, &online_list); - spin_unlock_irqrestore(&dev_lock, flags); - goto done; block_initialize_err: @@ -4453,15 +4338,9 @@ static void mtip_pci_remove(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); int counter = 0; - unsigned long flags; set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); - spin_lock_irqsave(&dev_lock, flags); - list_del_init(&dd->online_list); - list_add(&dd->remove_list, &removing_list); - spin_unlock_irqrestore(&dev_lock, flags); - if (mtip_check_surprise_removal(pdev)) { while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { counter++; @@ -4487,10 +4366,6 @@ static void mtip_pci_remove(struct pci_dev *pdev) pci_disable_msi(pdev); - spin_lock_irqsave(&dev_lock, flags); - list_del_init(&dd->remove_list); - spin_unlock_irqrestore(&dev_lock, flags); - kfree(dd); pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); } @@ -4638,11 +4513,6 @@ static int __init mtip_init(void) pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); - spin_lock_init(&dev_lock); - - INIT_LIST_HEAD(&online_list); - INIT_LIST_HEAD(&removing_list); - /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { @@ -4652,18 +4522,11 @@ static int __init mtip_init(void) } mtip_major = error; - dfs_parent = debugfs_create_dir("rssd", NULL); - if (IS_ERR_OR_NULL(dfs_parent)) { - pr_warn("Error creating debugfs parent\n"); - dfs_parent = NULL; - } - if (dfs_parent) { - dfs_device_status = debugfs_create_file("device_status", - S_IRUGO, dfs_parent, NULL, - &mtip_device_status_fops); - if (IS_ERR_OR_NULL(dfs_device_status)) { - pr_err("Error creating device_status node\n"); - dfs_device_status = NULL; + if (!dfs_parent) { + dfs_parent = debugfs_create_dir("rssd", NULL); + if (IS_ERR_OR_NULL(dfs_parent)) { + pr_warn("Error creating debugfs parent\n"); + dfs_parent = NULL; } } diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h index 8e8334c9dd0f..3bffff5f670c 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.h +++ b/trunk/drivers/block/mtip32xx/mtip32xx.h @@ -129,9 +129,9 @@ enum { MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ - MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | - (1 << MTIP_PF_EH_ACTIVE_BIT) | - (1 << MTIP_PF_SE_ACTIVE_BIT) | + MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ + (1 << MTIP_PF_EH_ACTIVE_BIT) | \ + (1 << MTIP_PF_SE_ACTIVE_BIT) | \ (1 << MTIP_PF_DM_ACTIVE_BIT)), MTIP_PF_SVC_THD_ACTIVE_BIT = 4, @@ -144,9 +144,9 @@ enum { MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, - MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | - (1 << MTIP_DDF_SEC_LOCK_BIT) | - (1 << MTIP_DDF_OVER_TEMP_BIT) | + MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ + (1 << MTIP_DDF_SEC_LOCK_BIT) | \ + (1 << MTIP_DDF_OVER_TEMP_BIT) | \ (1 << MTIP_DDF_WRITE_PROTECT_BIT)), MTIP_DDF_CLEANUP_BIT = 5, @@ -180,7 +180,7 @@ struct mtip_work { #define MTIP_TRIM_TIMEOUT_MS 240000 #define MTIP_MAX_TRIM_ENTRIES 8 -#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 +#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 struct mtip_trim_entry { u32 lba; /* starting lba of region */ @@ -501,10 +501,6 @@ struct driver_data { atomic_t irq_workers_active; int isr_binding; - - struct list_head online_list; /* linkage for online list */ - - struct list_head remove_list; /* linkage for removing list */ }; #endif diff --git a/trunk/drivers/crypto/ux500/cryp/cryp_core.c b/trunk/drivers/crypto/ux500/cryp/cryp_core.c index 22c9063e0120..8bc5fef07e7a 100644 --- a/trunk/drivers/crypto/ux500/cryp/cryp_core.c +++ b/trunk/drivers/crypto/ux500/cryp/cryp_core.c @@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { .shutdown = ux500_cryp_shutdown, .driver = { .owner = THIS_MODULE, - .name = "cryp1", + .name = "cryp1" .pm = &ux500_cryp_pm, } }; diff --git a/trunk/drivers/eisa/pci_eisa.c b/trunk/drivers/eisa/pci_eisa.c index 6c3fca97d346..cdae207028a7 100644 --- a/trunk/drivers/eisa/pci_eisa.c +++ b/trunk/drivers/eisa/pci_eisa.c @@ -19,10 +19,10 @@ /* There is only *one* pci_eisa device per machine, right ? */ static struct eisa_root_device pci_eisa_root; -static int __init pci_eisa_init(struct pci_dev *pdev) +static int __init pci_eisa_init(struct pci_dev *pdev, + const struct pci_device_id *ent) { - int rc, i; - struct resource *res, *bus_res = NULL; + int rc; if ((rc = pci_enable_device (pdev))) { printk (KERN_ERR "pci_eisa : Could not enable device %s\n", @@ -30,30 +30,9 @@ static int __init pci_eisa_init(struct pci_dev *pdev) return rc; } - /* - * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI - * device, so the resources available on EISA are the same as those - * available on the 82375 bus. This works the same as a PCI-PCI - * bridge in subtractive-decode mode (see pci_read_bridge_bases()). - * We assume other PCI-EISA bridges are similar. - * - * eisa_root_register() can only deal with a single io port resource, - * so we use the first valid io port resource. - */ - pci_bus_for_each_resource(pdev->bus, res, i) - if (res && (res->flags & IORESOURCE_IO)) { - bus_res = res; - break; - } - - if (!bus_res) { - dev_err(&pdev->dev, "No resources available\n"); - return -1; - } - pci_eisa_root.dev = &pdev->dev; - pci_eisa_root.res = bus_res; - pci_eisa_root.bus_base_addr = bus_res->start; + pci_eisa_root.res = pdev->bus->resource[0]; + pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; pci_eisa_root.slots = EISA_MAX_SLOTS; pci_eisa_root.dma_mask = pdev->dma_mask; dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); @@ -66,26 +45,22 @@ static int __init pci_eisa_init(struct pci_dev *pdev) return 0; } -/* - * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). - * Otherwise pnp resource will get enabled early and could prevent eisa - * to be initialized. - * Also need to make sure pci_eisa_init_early() is called after - * x86/pci_subsys_init(). - * So need to use subsys_initcall_sync with it. - */ -static int __init pci_eisa_init_early(void) -{ - struct pci_dev *dev = NULL; - int ret; +static struct pci_device_id pci_eisa_pci_tbl[] = { + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, + { 0, } +}; - for_each_pci_dev(dev) - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { - ret = pci_eisa_init(dev); - if (ret) - return ret; - } +static struct pci_driver __refdata pci_eisa_driver = { + .name = "pci_eisa", + .id_table = pci_eisa_pci_tbl, + .probe = pci_eisa_init, +}; - return 0; +static int __init pci_eisa_init_module (void) +{ + return pci_register_driver (&pci_eisa_driver); } -subsys_initcall_sync(pci_eisa_init_early); + +device_initcall(pci_eisa_init_module); +MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index 78d8e919509f..fe22bb780e1d 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -751,6 +751,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, int i; unsigned char misc = 0; unsigned char ext_vga[6]; + unsigned char ext_vga_index24; + unsigned char dac_index90 = 0; u8 bppshift; static unsigned char dacvalue[] = { @@ -801,6 +803,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, option2 = 0x0000b000; break; case G200_ER: + dac_index90 = 0; break; } @@ -849,8 +852,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, WREG_DAC(i, dacvalue[i]); } - if (mdev->type == G200_ER) - WREG_DAC(0x90, 0); + if (mdev->type == G200_ER) { + WREG_DAC(0x90, dac_index90); + } + if (option) pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); @@ -947,6 +952,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (mdev->type == G200_WB) ext_vga[1] |= 0x88; + ext_vga_index24 = 0x05; + /* Set pixel clocks */ misc = 0x2d; WREG8(MGA_MISC_OUT, misc); @@ -958,7 +965,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, } if (mdev->type == G200_ER) - WREG_ECRT(0x24, 0x5); + WREG_ECRT(24, ext_vga_index24); if (mdev->type == G200_EV) { WREG_ECRT(6, 0); diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index 0e2c1a4f1659..e816f06637a7 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -248,22 +248,6 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) } } -static void -nouveau_bios_shadow_platform(struct nouveau_bios *bios) -{ - struct pci_dev *pdev = nv_device(bios)->pdev; - size_t size; - - void __iomem *rom = pci_platform_rom(pdev, &size); - if (rom && size) { - bios->data = kmalloc(size, GFP_KERNEL); - if (bios->data) { - memcpy_fromio(bios->data, rom, size); - bios->size = size; - } - } -} - static int nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) { @@ -304,7 +288,6 @@ nouveau_bios_shadow(struct nouveau_bios *bios) { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, - { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, {} }; struct methods *mthd, *best; diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c index 1ddc03e51bf4..7f0e6c3f37d1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c @@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data) { struct nv50_display_flip *flip = data; if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == - flip->chan->data) + flip->chan->data); return true; usleep_range(1, 2); return false; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c index fa3c56fba294..b8015913d382 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c @@ -99,29 +99,6 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } -static bool radeon_read_platform_bios(struct radeon_device *rdev) -{ - uint8_t __iomem *bios; - size_t size; - - rdev->bios = NULL; - - bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { - return false; - } - - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { - return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } - - return true; -} - #ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. @@ -643,9 +620,6 @@ bool radeon_get_bios(struct radeon_device *rdev) if (r == false) { r = radeon_read_disabled_bios(rdev); } - if (r == false) { - r = radeon_read_platform_bios(rdev); - } if (r == false || rdev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); rdev->bios = NULL; diff --git a/trunk/drivers/hwspinlock/hwspinlock_core.c b/trunk/drivers/hwspinlock/hwspinlock_core.c index 461a0d739d75..db713c0dfba4 100644 --- a/trunk/drivers/hwspinlock/hwspinlock_core.c +++ b/trunk/drivers/hwspinlock/hwspinlock_core.c @@ -416,8 +416,6 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "%s: can't power on device\n", __func__); - pm_runtime_put_noidle(dev); - module_put(dev->driver->owner); return ret; } diff --git a/trunk/drivers/md/dm-cache-target.c b/trunk/drivers/md/dm-cache-target.c index 10744091e6ca..66120bd46d15 100644 --- a/trunk/drivers/md/dm-cache-target.c +++ b/trunk/drivers/md/dm-cache-target.c @@ -6,7 +6,6 @@ #include "dm.h" #include "dm-bio-prison.h" -#include "dm-bio-record.h" #include "dm-cache-metadata.h" #include @@ -202,15 +201,10 @@ struct per_bio_data { unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; - /* - * writethrough fields. These MUST remain at the end of this - * structure and the 'cache' member must be the first as it - * is used to determine the offsetof the writethrough fields. - */ + /* writethrough fields */ struct cache *cache; dm_cblock_t cblock; bio_end_io_t *saved_bi_end_io; - struct dm_bio_details bio_details; }; struct dm_cache_migration { @@ -519,28 +513,16 @@ static void save_stats(struct cache *cache) /*---------------------------------------------------------------- * Per bio data *--------------------------------------------------------------*/ - -/* - * If using writeback, leave out struct per_bio_data's writethrough fields. - */ -#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) -#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) - -static size_t get_per_bio_data_size(struct cache *cache) -{ - return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; -} - -static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) +static struct per_bio_data *get_per_bio_data(struct bio *bio) { - struct per_bio_data *pb = dm_per_bio_data(bio, data_size); + struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); BUG_ON(!pb); return pb; } -static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) +static struct per_bio_data *init_per_bio_data(struct bio *bio) { - struct per_bio_data *pb = get_per_bio_data(bio, data_size); + struct per_bio_data *pb = get_per_bio_data(bio); pb->tick = false; pb->req_nr = dm_bio_get_target_bio_nr(bio); @@ -574,8 +556,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) { unsigned long flags; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct per_bio_data *pb = get_per_bio_data(bio); spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && @@ -654,7 +635,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) static void writethrough_endio(struct bio *bio, int err) { - struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); + struct per_bio_data *pb = get_per_bio_data(bio); bio->bi_end_io = pb->saved_bi_end_io; if (err) { @@ -662,7 +643,6 @@ static void writethrough_endio(struct bio *bio, int err) return; } - dm_bio_restore(&pb->bio_details, bio); remap_to_cache(pb->cache, bio, pb->cblock); /* @@ -682,12 +662,11 @@ static void writethrough_endio(struct bio *bio, int err) static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, dm_oblock_t oblock, dm_cblock_t cblock) { - struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); + struct per_bio_data *pb = get_per_bio_data(bio); pb->cache = cache; pb->cblock = cblock; pb->saved_bi_end_io = bio->bi_end_io; - dm_bio_record(&pb->bio_details, bio); bio->bi_end_io = writethrough_endio; remap_to_origin_clear_discard(pb->cache, bio, oblock); @@ -1056,8 +1035,7 @@ static void defer_bio(struct cache *cache, struct bio *bio) static void process_flush_bio(struct cache *cache, struct bio *bio) { - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct per_bio_data *pb = get_per_bio_data(bio); BUG_ON(bio->bi_size); if (!pb->req_nr) @@ -1129,8 +1107,7 @@ static void process_bio(struct cache *cache, struct prealloc *structs, dm_oblock_t block = get_bio_block(cache, bio); struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct policy_result lookup_result; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct per_bio_data *pb = get_per_bio_data(bio); bool discarded_block = is_discarded_oblock(cache, block); bool can_migrate = discarded_block || spare_migration_bandwidth(cache); @@ -1904,6 +1881,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) cache->ti = ca->ti; ti->private = cache; + ti->per_bio_data_size = sizeof(struct per_bio_data); ti->num_flush_bios = 2; ti->flush_supported = true; @@ -1912,7 +1890,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discard_zeroes_data_unsupported = true; memcpy(&cache->features, &ca->features, sizeof(cache->features)); - ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; dm_table_add_target_callbacks(ti->table, &cache->callbacks); @@ -2115,7 +2092,6 @@ static int cache_map(struct dm_target *ti, struct bio *bio) int r; dm_oblock_t block = get_bio_block(cache, bio); - size_t pb_data_size = get_per_bio_data_size(cache); bool can_migrate = false; bool discarded_block; struct dm_bio_prison_cell *cell; @@ -2132,7 +2108,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - pb = init_per_bio_data(bio, pb_data_size); + pb = init_per_bio_data(bio); if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); @@ -2217,8 +2193,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) { struct cache *cache = ti->private; unsigned long flags; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + struct per_bio_data *pb = get_per_bio_data(bio); if (pb->tick) { policy_tick(cache->policy); diff --git a/trunk/drivers/pci/pci-acpi.c b/trunk/drivers/pci/pci-acpi.c index 5147c210df52..dee5dddaa292 100644 --- a/trunk/drivers/pci/pci-acpi.c +++ b/trunk/drivers/pci/pci-acpi.c @@ -53,15 +53,14 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) return; } - /* Clear PME Status if set. */ - if (pci_dev->pme_support) - pci_check_pme_status(pci_dev); + if (!pci_dev->pm_cap || !pci_dev->pme_support + || pci_check_pme_status(pci_dev)) { + if (pci_dev->pme_poll) + pci_dev->pme_poll = false; - if (pci_dev->pme_poll) - pci_dev->pme_poll = false; - - pci_wakeup_event(pci_dev); - pm_runtime_resume(&pci_dev->dev); + pci_wakeup_event(pci_dev); + pm_runtime_resume(&pci_dev->dev); + } if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index 79277fb36c6b..1fa1e482a999 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -390,10 +390,9 @@ static void pci_device_shutdown(struct device *dev) /* * Turn off Bus Master bit on the device to tell it to not - * continue to do DMA. Don't touch devices in D3cold or unknown states. + * continue to do DMA */ - if (pci_dev->current_state <= PCI_D3hot) - pci_clear_master(pci_dev); + pci_clear_master(pci_dev); } #ifdef CONFIG_PM diff --git a/trunk/drivers/pci/pcie/portdrv_pci.c b/trunk/drivers/pci/pcie/portdrv_pci.c index ed4d09498337..08c243ab034e 100644 --- a/trunk/drivers/pci/pcie/portdrv_pci.c +++ b/trunk/drivers/pci/pcie/portdrv_pci.c @@ -184,6 +184,14 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { #define PCIE_PORTDRV_PM_OPS NULL #endif /* !PM */ +/* + * PCIe port runtime suspend is broken for some chipsets, so use a + * black list to disable runtime PM for these chipsets. + */ +static const struct pci_device_id port_runtime_pm_black_list[] = { + { /* end: all zeroes */ } +}; + /* * pcie_portdrv_probe - Probe PCI-Express port devices * @dev: PCI-Express port device being probed @@ -217,11 +225,16 @@ static int pcie_portdrv_probe(struct pci_dev *dev, * it by default. */ dev->d3cold_allowed = false; + if (!pci_match_id(port_runtime_pm_black_list, dev)) + pm_runtime_put_noidle(&dev->dev); + return 0; } static void pcie_portdrv_remove(struct pci_dev *dev) { + if (!pci_match_id(port_runtime_pm_black_list, dev)) + pm_runtime_get_noresume(&dev->dev); pcie_port_device_remove(dev); pci_disable_device(dev); } diff --git a/trunk/drivers/pci/rom.c b/trunk/drivers/pci/rom.c index c5d0a08a8747..b41ac7756a4b 100644 --- a/trunk/drivers/pci/rom.c +++ b/trunk/drivers/pci/rom.c @@ -100,6 +100,27 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) return min((size_t)(image - rom), size); } +static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) +{ + struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; + loff_t start; + + /* assign the ROM an address if it doesn't have one */ + if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) + return 0; + start = pci_resource_start(pdev, PCI_ROM_RESOURCE); + *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + + if (*size == 0) + return 0; + + /* Enable ROM space decodes */ + if (pci_enable_rom(pdev)) + return 0; + + return start; +} + /** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct @@ -114,7 +135,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; - loff_t start; + loff_t start = 0; void __iomem *rom; /* @@ -133,21 +154,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) return (void __iomem *)(unsigned long) pci_resource_start(pdev, PCI_ROM_RESOURCE); } else { - /* assign the ROM an address if it doesn't have one */ - if (res->parent == NULL && - pci_assign_resource(pdev,PCI_ROM_RESOURCE)) - return NULL; - start = pci_resource_start(pdev, PCI_ROM_RESOURCE); - *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - if (*size == 0) - return NULL; - - /* Enable ROM space decodes */ - if (pci_enable_rom(pdev)) - return NULL; + start = pci_find_rom(pdev, size); } } + /* + * Some devices may provide ROMs via a source other than the BAR + */ + if (!start && pdev->rom && pdev->romlen) { + *size = pdev->romlen; + return phys_to_virt(pdev->rom); + } + + if (!start) + return NULL; + rom = ioremap(start, *size); if (!rom) { /* restore enable if ioremap fails */ @@ -181,7 +202,8 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) return; - iounmap(rom); + if (!pdev->rom || !pdev->romlen) + iounmap(rom); /* Disable again before continuing, leave enabled if pci=rom */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) @@ -205,24 +227,7 @@ void pci_cleanup_rom(struct pci_dev *pdev) } } -/** - * pci_platform_rom - provides a pointer to any ROM image provided by the - * platform - * @pdev: pointer to pci device struct - * @size: pointer to receive size of pci window over ROM - */ -void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) -{ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt((phys_addr_t)pdev->rom); - } - - return NULL; -} - EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); EXPORT_SYMBOL_GPL(pci_enable_rom); EXPORT_SYMBOL_GPL(pci_disable_rom); -EXPORT_SYMBOL(pci_platform_rom); diff --git a/trunk/drivers/remoteproc/Kconfig b/trunk/drivers/remoteproc/Kconfig index c6d77e20622c..cc1f7bf53fd0 100644 --- a/trunk/drivers/remoteproc/Kconfig +++ b/trunk/drivers/remoteproc/Kconfig @@ -4,7 +4,7 @@ menu "Remoteproc drivers" config REMOTEPROC tristate depends on HAS_DMA - select FW_LOADER + select FW_CONFIG select VIRTIO config OMAP_REMOTEPROC diff --git a/trunk/drivers/remoteproc/remoteproc_core.c b/trunk/drivers/remoteproc/remoteproc_core.c index 8edb4aed5d36..29387df4bfc9 100644 --- a/trunk/drivers/remoteproc/remoteproc_core.c +++ b/trunk/drivers/remoteproc/remoteproc_core.c @@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) * TODO: support predefined notifyids (via resource table) */ ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); - if (ret < 0) { + if (ret) { dev_err(dev, "idr_alloc failed: %d\n", ret); dma_free_coherent(dev->parent, size, va, dma); return ret; @@ -366,12 +366,10 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* it is now safe to add the virtio device */ ret = rproc_add_virtio_dev(rvdev, rsc->id); if (ret) - goto remove_rvdev; + goto free_rvdev; return 0; -remove_rvdev: - list_del(&rvdev->node); free_rvdev: kfree(rvdev); return ret; diff --git a/trunk/drivers/remoteproc/ste_modem_rproc.c b/trunk/drivers/remoteproc/ste_modem_rproc.c index fb95c4220052..a7743c069339 100644 --- a/trunk/drivers/remoteproc/ste_modem_rproc.c +++ b/trunk/drivers/remoteproc/ste_modem_rproc.c @@ -240,8 +240,6 @@ static int sproc_drv_remove(struct platform_device *pdev) /* Unregister as remoteproc device */ rproc_del(sproc->rproc); - dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); rproc_put(sproc->rproc); mdev->drv_data = NULL; @@ -299,13 +297,10 @@ static int sproc_probe(struct platform_device *pdev) /* Register as a remoteproc device */ err = rproc_add(rproc); if (err) - goto free_mem; + goto free_rproc; return 0; -free_mem: - dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); free_rproc: /* Reset device data upon error */ mdev->drv_data = NULL; diff --git a/trunk/drivers/vfio/pci/vfio_pci.c b/trunk/drivers/vfio/pci/vfio_pci.c index 7abc5c81af2c..8189cb6a86af 100644 --- a/trunk/drivers/vfio/pci/vfio_pci.c +++ b/trunk/drivers/vfio/pci/vfio_pci.c @@ -346,7 +346,6 @@ static long vfio_pci_ioctl(void *device_data, if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { size_t size; - int max = vfio_pci_get_irq_count(vdev, hdr.index); if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) size = sizeof(uint8_t); @@ -356,7 +355,7 @@ static long vfio_pci_ioctl(void *device_data, return -EINVAL; if (hdr.argsz - minsz < hdr.count * size || - hdr.start >= max || hdr.start + hdr.count > max) + hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) return -EINVAL; data = memdup_user((void __user *)(arg + minsz), diff --git a/trunk/fs/ecryptfs/miscdev.c b/trunk/fs/ecryptfs/miscdev.c index e4141f257495..412e6eda25f8 100644 --- a/trunk/fs/ecryptfs/miscdev.c +++ b/trunk/fs/ecryptfs/miscdev.c @@ -80,6 +80,13 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) int rc; mutex_lock(&ecryptfs_daemon_hash_mux); + rc = try_module_get(THIS_MODULE); + if (rc == 0) { + rc = -EIO; + printk(KERN_ERR "%s: Error attempting to increment module use " + "count; rc = [%d]\n", __func__, rc); + goto out_unlock_daemon_list; + } rc = ecryptfs_find_daemon_by_euid(&daemon); if (!rc) { rc = -EINVAL; @@ -89,7 +96,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) if (rc) { printk(KERN_ERR "%s: Error attempting to spawn daemon; " "rc = [%d]\n", __func__, rc); - goto out_unlock_daemon_list; + goto out_module_put_unlock_daemon_list; } mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { @@ -101,6 +108,9 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) atomic_inc(&ecryptfs_num_miscdev_opens); out_unlock_daemon: mutex_unlock(&daemon->mux); +out_module_put_unlock_daemon_list: + if (rc) + module_put(THIS_MODULE); out_unlock_daemon_list: mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; @@ -137,6 +147,7 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) "bug.\n", __func__, rc); BUG(); } + module_put(THIS_MODULE); return rc; } @@ -460,7 +471,6 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, static const struct file_operations ecryptfs_miscdev_fops = { - .owner = THIS_MODULE, .open = ecryptfs_miscdev_open, .poll = ecryptfs_miscdev_poll, .read = ecryptfs_miscdev_read, diff --git a/trunk/fs/namespace.c b/trunk/fs/namespace.c index 341d3f564082..d581e45c0a9f 100644 --- a/trunk/fs/namespace.c +++ b/trunk/fs/namespace.c @@ -1690,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name, if (IS_ERR(mnt)) { err = PTR_ERR(mnt); - goto out2; + goto out; } err = graft_tree(mnt, path); diff --git a/trunk/fs/nfs/nfs4client.c b/trunk/fs/nfs/nfs4client.c index 66b6664dcd4c..ac4fc9a8fdbc 100644 --- a/trunk/fs/nfs/nfs4client.c +++ b/trunk/fs/nfs/nfs4client.c @@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new, struct rpc_cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); - struct nfs_client *pos, *prev = NULL; + struct nfs_client *pos, *n, *prev = NULL; struct nfs4_setclientid_res clid = { .clientid = new->cl_clientid, .confirm = new->cl_confirm, @@ -308,23 +308,10 @@ int nfs40_walk_client_list(struct nfs_client *new, int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); - list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { + list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { /* If "pos" isn't marked ready, we can't trust the * remaining fields in "pos" */ - if (pos->cl_cons_state > NFS_CS_READY) { - atomic_inc(&pos->cl_count); - spin_unlock(&nn->nfs_client_lock); - - if (prev) - nfs_put_client(prev); - prev = pos; - - status = nfs_wait_client_init_complete(pos); - spin_lock(&nn->nfs_client_lock); - if (status < 0) - continue; - } - if (pos->cl_cons_state != NFS_CS_READY) + if (pos->cl_cons_state < NFS_CS_READY) continue; if (pos->rpc_ops != new->rpc_ops) @@ -436,16 +423,16 @@ int nfs41_walk_client_list(struct nfs_client *new, struct rpc_cred *cred) { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); - struct nfs_client *pos, *prev = NULL; + struct nfs_client *pos, *n, *prev = NULL; int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); - list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { + list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { /* If "pos" isn't marked ready, we can't trust the * remaining fields in "pos", especially the client * ID and serverowner fields. Wait for CREATE_SESSION * to finish. */ - if (pos->cl_cons_state > NFS_CS_READY) { + if (pos->cl_cons_state < NFS_CS_READY) { atomic_inc(&pos->cl_count); spin_unlock(&nn->nfs_client_lock); @@ -453,17 +440,18 @@ int nfs41_walk_client_list(struct nfs_client *new, nfs_put_client(prev); prev = pos; + nfs4_schedule_lease_recovery(pos); status = nfs_wait_client_init_complete(pos); - if (status == 0) { - nfs4_schedule_lease_recovery(pos); - status = nfs4_wait_clnt_recover(pos); + if (status < 0) { + nfs_put_client(pos); + spin_lock(&nn->nfs_client_lock); + continue; } + status = pos->cl_cons_state; spin_lock(&nn->nfs_client_lock); if (status < 0) continue; } - if (pos->cl_cons_state != NFS_CS_READY) - continue; if (pos->rpc_ops != new->rpc_ops) continue; @@ -481,18 +469,17 @@ int nfs41_walk_client_list(struct nfs_client *new, continue; atomic_inc(&pos->cl_count); - *result = pos; - status = 0; + spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", __func__, pos, atomic_read(&pos->cl_count)); - break; + + *result = pos; + return 0; } /* No matching nfs_client found. */ spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s status = %d\n", __func__, status); - if (prev) - nfs_put_client(prev); return status; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index 0ad025eb523b..26431cf62ddb 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -1046,7 +1046,6 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) /* Save the delegation */ nfs4_stateid_copy(&stateid, &delegation->stateid); rcu_read_unlock(); - nfs_release_seqid(opendata->o_arg.seqid); ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); if (ret != 0) goto out; diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c index d41a3518509f..6ace365c6334 100644 --- a/trunk/fs/nfs/nfs4state.c +++ b/trunk/fs/nfs/nfs4state.c @@ -1886,13 +1886,7 @@ int nfs4_discover_server_trunking(struct nfs_client *clp, status = PTR_ERR(clnt); break; } - /* Note: this is safe because we haven't yet marked the - * client as ready, so we are the only user of - * clp->cl_rpcclient - */ - clnt = xchg(&clp->cl_rpcclient, clnt); - rpc_shutdown_client(clnt); - clnt = clp->cl_rpcclient; + clp->cl_rpcclient = clnt; goto again; case -NFS4ERR_MINOR_VERS_MISMATCH: diff --git a/trunk/fs/proc/generic.c b/trunk/fs/proc/generic.c index 21e1a8f1659d..4b3b3ffb52f1 100644 --- a/trunk/fs/proc/generic.c +++ b/trunk/fs/proc/generic.c @@ -755,8 +755,37 @@ void pde_put(struct proc_dir_entry *pde) free_proc_entry(pde); } -static void entry_rundown(struct proc_dir_entry *de) +/* + * Remove a /proc entry and free it if it's not currently in use. + */ +void remove_proc_entry(const char *name, struct proc_dir_entry *parent) { + struct proc_dir_entry **p; + struct proc_dir_entry *de = NULL; + const char *fn = name; + unsigned int len; + + spin_lock(&proc_subdir_lock); + if (__xlate_proc_name(name, &parent, &fn) != 0) { + spin_unlock(&proc_subdir_lock); + return; + } + len = strlen(fn); + + for (p = &parent->subdir; *p; p=&(*p)->next ) { + if (proc_match(len, fn, *p)) { + de = *p; + *p = de->next; + de->next = NULL; + break; + } + } + spin_unlock(&proc_subdir_lock); + if (!de) { + WARN(1, "name '%s'\n", name); + return; + } + spin_lock(&de->pde_unload_lock); /* * Stop accepting new callers into module. If you're @@ -788,40 +817,6 @@ static void entry_rundown(struct proc_dir_entry *de) spin_lock(&de->pde_unload_lock); } spin_unlock(&de->pde_unload_lock); -} - -/* - * Remove a /proc entry and free it if it's not currently in use. - */ -void remove_proc_entry(const char *name, struct proc_dir_entry *parent) -{ - struct proc_dir_entry **p; - struct proc_dir_entry *de = NULL; - const char *fn = name; - unsigned int len; - - spin_lock(&proc_subdir_lock); - if (__xlate_proc_name(name, &parent, &fn) != 0) { - spin_unlock(&proc_subdir_lock); - return; - } - len = strlen(fn); - - for (p = &parent->subdir; *p; p=&(*p)->next ) { - if (proc_match(len, fn, *p)) { - de = *p; - *p = de->next; - de->next = NULL; - break; - } - } - spin_unlock(&proc_subdir_lock); - if (!de) { - WARN(1, "name '%s'\n", name); - return; - } - - entry_rundown(de); if (S_ISDIR(de->mode)) parent->nlink--; @@ -832,57 +827,3 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) pde_put(de); } EXPORT_SYMBOL(remove_proc_entry); - -int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) -{ - struct proc_dir_entry **p; - struct proc_dir_entry *root = NULL, *de, *next; - const char *fn = name; - unsigned int len; - - spin_lock(&proc_subdir_lock); - if (__xlate_proc_name(name, &parent, &fn) != 0) { - spin_unlock(&proc_subdir_lock); - return -ENOENT; - } - len = strlen(fn); - - for (p = &parent->subdir; *p; p=&(*p)->next ) { - if (proc_match(len, fn, *p)) { - root = *p; - *p = root->next; - root->next = NULL; - break; - } - } - if (!root) { - spin_unlock(&proc_subdir_lock); - return -ENOENT; - } - de = root; - while (1) { - next = de->subdir; - if (next) { - de->subdir = next->next; - next->next = NULL; - de = next; - continue; - } - spin_unlock(&proc_subdir_lock); - - entry_rundown(de); - next = de->parent; - if (S_ISDIR(de->mode)) - next->nlink--; - de->nlink = 0; - if (de == root) - break; - pde_put(de); - - spin_lock(&proc_subdir_lock); - de = next; - } - pde_put(root); - return 0; -} -EXPORT_SYMBOL(remove_proc_subtree); diff --git a/trunk/include/linux/ata.h b/trunk/include/linux/ata.h index ee0bd9524055..8f7a3d68371a 100644 --- a/trunk/include/linux/ata.h +++ b/trunk/include/linux/ata.h @@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) } } -static inline int atapi_command_packet_set(const u16 *dev_id) +static inline bool atapi_command_packet_set(const u16 *dev_id) { return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 167abf907802..e5ca8ef50e9b 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -89,7 +89,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * that the call back has its own recursion protection. If it does * not set this, then the ftrace infrastructure will add recursion * protection for the caller. - * STUB - The ftrace_ops is just a place holder. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -99,7 +98,6 @@ enum { FTRACE_OPS_FL_SAVE_REGS = 1 << 4, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, - FTRACE_OPS_FL_STUB = 1 << 7, }; struct ftrace_ops { diff --git a/trunk/include/linux/kvm_host.h b/trunk/include/linux/kvm_host.h index c13958251927..cad77fe09d77 100644 --- a/trunk/include/linux/kvm_host.h +++ b/trunk/include/linux/kvm_host.h @@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len); + gpa_t gpa); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/trunk/include/linux/kvm_types.h b/trunk/include/linux/kvm_types.h index b0bcce0ddc95..fa7cc7244cbd 100644 --- a/trunk/include/linux/kvm_types.h +++ b/trunk/include/linux/kvm_types.h @@ -71,7 +71,6 @@ struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; - unsigned long len; struct kvm_memory_slot *memslot; }; diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h index eae7a053dc51..91c9d109e5f1 100644 --- a/trunk/include/linux/libata.h +++ b/trunk/include/linux/libata.h @@ -398,7 +398,6 @@ enum { ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ - ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/trunk/include/linux/pci.h b/trunk/include/linux/pci.h index 710067f3618c..2461033a7987 100644 --- a/trunk/include/linux/pci.h +++ b/trunk/include/linux/pci.h @@ -916,7 +916,6 @@ void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); diff --git a/trunk/include/linux/preempt.h b/trunk/include/linux/preempt.h index 87a03c746f17..5a710b9c578e 100644 --- a/trunk/include/linux/preempt.h +++ b/trunk/include/linux/preempt.h @@ -93,20 +93,14 @@ do { \ #else /* !CONFIG_PREEMPT_COUNT */ -/* - * Even if we don't have any preemption, we need preempt disable/enable - * to be barriers, so that we don't have things like get_user/put_user - * that can cause faults and scheduling migrate into our preempt-protected - * region. - */ -#define preempt_disable() barrier() -#define sched_preempt_enable_no_resched() barrier() -#define preempt_enable_no_resched() barrier() -#define preempt_enable() barrier() - -#define preempt_disable_notrace() barrier() -#define preempt_enable_no_resched_notrace() barrier() -#define preempt_enable_notrace() barrier() +#define preempt_disable() do { } while (0) +#define sched_preempt_enable_no_resched() do { } while (0) +#define preempt_enable_no_resched() do { } while (0) +#define preempt_enable() do { } while (0) + +#define preempt_disable_notrace() do { } while (0) +#define preempt_enable_no_resched_notrace() do { } while (0) +#define preempt_enable_notrace() do { } while (0) #endif /* CONFIG_PREEMPT_COUNT */ diff --git a/trunk/include/linux/proc_fs.h b/trunk/include/linux/proc_fs.h index 94dfb2aa5533..8307f2f94d86 100644 --- a/trunk/include/linux/proc_fs.h +++ b/trunk/include/linux/proc_fs.h @@ -117,7 +117,6 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, const struct file_operations *proc_fops, void *data); extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); -extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent); struct pid_namespace; @@ -203,7 +202,6 @@ static inline struct proc_dir_entry *proc_create_data(const char *name, return NULL; } #define remove_proc_entry(name, parent) do {} while (0) -#define remove_proc_subtree(name, parent) do {} while (0) static inline struct proc_dir_entry *proc_symlink(const char *name, struct proc_dir_entry *parent,const char *dest) {return NULL;} diff --git a/trunk/include/linux/spinlock_up.h b/trunk/include/linux/spinlock_up.h index e2369c167dbd..a26e2fb604e6 100644 --- a/trunk/include/linux/spinlock_up.h +++ b/trunk/include/linux/spinlock_up.h @@ -16,10 +16,7 @@ * In the debug case, 1 means unlocked, 0 means locked. (the values * are inverted, to catch initialization bugs) * - * No atomicity anywhere, we are on UP. However, we still need - * the compiler barriers, because we do not want the compiler to - * move potentially faulting instructions (notably user accesses) - * into the locked sequence, resulting in non-atomic execution. + * No atomicity anywhere, we are on UP. */ #ifdef CONFIG_DEBUG_SPINLOCK @@ -28,7 +25,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; - barrier(); } static inline void @@ -36,7 +32,6 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; - barrier(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -44,34 +39,32 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) char oldval = lock->slock; lock->slock = 0; - barrier(); return oldval > 0; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { - barrier(); lock->slock = 1; } /* * Read-write spinlocks. No debug version. */ -#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) -#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) -#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) -#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) -#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) -#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_read_lock(lock) do { (void)(lock); } while (0) +#define arch_write_lock(lock) do { (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ (void)(lock); 1; }) +#define arch_read_unlock(lock) do { (void)(lock); } while (0) +#define arch_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) -# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) -# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) +# define arch_spin_lock(lock) do { (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ #define arch_spin_is_contended(lock) (((void)(lock), 0)) diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index 7e897106b7e0..6893d5a2bf08 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -66,7 +66,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = { .func = ftrace_stub, - .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; /* ftrace_enabled is a method to turn ftrace on or off */ @@ -4131,8 +4131,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, preempt_disable_notrace(); trace_recursion_set(TRACE_CONTROL_BIT); do_for_each_ftrace_op(op, ftrace_control_list) { - if (!(op->flags & FTRACE_OPS_FL_STUB) && - !ftrace_function_local_disabled(op) && + if (!ftrace_function_local_disabled(op) && ftrace_ops_test(op, ip)) op->func(ip, parent_ip, op, regs); } while_for_each_ftrace_op(op); @@ -4556,8 +4555,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ftrace_startup_sysctl(); /* we are starting ftrace again */ - if (ftrace_ops_list != &ftrace_list_end) - update_ftrace_function(); + if (ftrace_ops_list != &ftrace_list_end) { + if (ftrace_ops_list->next == &ftrace_list_end) + ftrace_trace_function = ftrace_ops_list->func; + else + ftrace_trace_function = ftrace_ops_list_func; + } } else { /* stopping ftrace calls (just send to ftrace_stub) */ diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index 7ba7fc76f9eb..4f1dade56981 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -744,11 +744,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - if (!current_trace->allocated_snapshot) { - /* Only the nop tracer should hit this when disabling */ - WARN_ON_ONCE(current_trace != &nop_trace); + if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) return; - } arch_spin_lock(&ftrace_max_lock); diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index f6d629fd6aee..46a5be85be87 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (dst) dst->ops->redirect(dst, sk, skb); + goto out; } if (type == ICMPV6_PKT_TOOBIG) { diff --git a/trunk/net/sunrpc/clnt.c b/trunk/net/sunrpc/clnt.c index d5f35f15af98..dcc446e7fbf6 100644 --- a/trunk/net/sunrpc/clnt.c +++ b/trunk/net/sunrpc/clnt.c @@ -304,8 +304,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru err = rpciod_up(); if (err) goto out_no_rpciod; - err = -EINVAL; + if (!xprt) + goto out_no_xprt; + if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; @@ -380,9 +382,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru out_no_stats: kfree(clnt); out_err: + xprt_put(xprt); +out_no_xprt: rpciod_down(); out_no_rpciod: - xprt_put(xprt); return ERR_PTR(err); } @@ -509,7 +512,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new = rpc_new_client(args, xprt); if (IS_ERR(new)) { err = PTR_ERR(new); - goto out_err; + goto out_put; } atomic_inc(&clnt->cl_count); @@ -522,6 +525,8 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, new->cl_chatty = clnt->cl_chatty; return new; +out_put: + xprt_put(xprt); out_err: dprintk("RPC: %s: returned error %d\n", __func__, err); return ERR_PTR(err); diff --git a/trunk/virt/kvm/kvm_main.c b/trunk/virt/kvm/kvm_main.c index f18013f09e68..adc68feb5c5a 100644 --- a/trunk/virt/kvm/kvm_main.c +++ b/trunk/virt/kvm/kvm_main.c @@ -1541,38 +1541,21 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa, unsigned long len) + gpa_t gpa) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t start_gfn = gpa >> PAGE_SHIFT; - gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; - gfn_t nr_pages_needed = end_gfn - start_gfn + 1; - gfn_t nr_pages_avail; + gfn_t gfn = gpa >> PAGE_SHIFT; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->len = len; - ghc->memslot = gfn_to_memslot(kvm, start_gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); - if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { + ghc->memslot = gfn_to_memslot(kvm, gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); + if (!kvm_is_error_hva(ghc->hva)) ghc->hva += offset; - } else { - /* - * If the requested region crosses two memslots, we still - * verify that the entire region is valid here. - */ - while (start_gfn <= end_gfn) { - ghc->memslot = gfn_to_memslot(kvm, start_gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, - &nr_pages_avail); - if (kvm_is_error_hva(ghc->hva)) - return -EFAULT; - start_gfn += nr_pages_avail; - } - /* Use the slow path for cross page reads and writes. */ - ghc->memslot = NULL; - } + else + return -EFAULT; + return 0; } EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); @@ -1583,13 +1566,8 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; - BUG_ON(len > ghc->len); - if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); - - if (unlikely(!ghc->memslot)) - return kvm_write_guest(kvm, ghc->gpa, data, len); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; @@ -1609,13 +1587,8 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; - BUG_ON(len > ghc->len); - if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); - - if (unlikely(!ghc->memslot)) - return kvm_read_guest(kvm, ghc->gpa, data, len); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); if (kvm_is_error_hva(ghc->hva)) return -EFAULT;